!596 [sync] PR-594: upgrade from upstream

* upgrade from upstream
This commit is contained in:
openeuler-sync-bot 2023-08-15 12:28:02 +00:00 committed by haozi007
parent fcbe0495eb
commit bfbf786760
12 changed files with 1431 additions and 1 deletions

View File

@ -0,0 +1,111 @@
From 6933b605d03ae7e8166bbb9826dd1eb914a9742e Mon Sep 17 00:00:00 2001
From: zhongtao <zhongtao17@huawei.com>
Date: Wed, 26 Jul 2023 17:07:15 +1400
Subject: [PATCH 01/11] fix stuck health check blocking container stop bugs
Signed-off-by: zhongtao <zhongtao17@huawei.com>
---
src/daemon/modules/api/container_api.h | 1 +
.../container/health_check/health_check.c | 43 ++++++++++++++++---
2 files changed, 38 insertions(+), 6 deletions(-)
diff --git a/src/daemon/modules/api/container_api.h b/src/daemon/modules/api/container_api.h
index ed97633f..4c1dd29a 100644
--- a/src/daemon/modules/api/container_api.h
+++ b/src/daemon/modules/api/container_api.h
@@ -51,6 +51,7 @@ typedef struct health_check_manager {
health_check_monitor_status_t monitor_status;
// Used to wait for the health check minotor thread to close
bool monitor_exist;
+ pthread_t monitor_tid;
} health_check_manager_t;
typedef struct _container_state_t_ {
diff --git a/src/daemon/modules/container/health_check/health_check.c b/src/daemon/modules/container/health_check/health_check.c
index e9dcbdb9..bd75382f 100644
--- a/src/daemon/modules/container/health_check/health_check.c
+++ b/src/daemon/modules/container/health_check/health_check.c
@@ -169,15 +169,49 @@ static bool get_monitor_exist_flag(health_check_manager_t *health)
static void close_health_check_monitor(container_t *cont)
{
+ int64_t timeout = 0;
+ /* wait 1 second to cancel monitor thread (2000 * 500 µs) */
+ int64_t retries = 2000;
+ int ret = -1;
+
if (cont == NULL || cont->health_check == NULL) {
return;
}
+ pthread_t monitor_tid = cont->health_check->monitor_tid;
set_monitor_stop_status(cont->health_check);
// ensure that the monitor process exits
while (get_monitor_exist_flag(cont->health_check)) {
util_usleep_nointerupt(500);
+ timeout += 1;
+ if (timeout <= retries) {
+ continue;
+ }
+ if (monitor_tid <= 0) {
+ break;
+ }
+ DEBUG("Try to cancel monitor thread");
+ ret = pthread_cancel(monitor_tid);
+ if (ret != 0 && ret != ESRCH) {
+ WARN("Failed to cancel monitor thread, try to kill thread");
+ pthread_kill(monitor_tid, SIGKILL);
+ }
+ break;
}
+
+ if (monitor_tid > 0 && pthread_join(monitor_tid, NULL) != 0) {
+ ERROR("Failed to join monitor thread");
+ }
+
+ // monitor_tid = 0: it corresponds to the initialization of the health check thread when starting the container.
+ // At this time, the purpose is to stop the health check thread process before starting a new health check thread,
+ // and there is no need to set the health check status.
+ if (monitor_tid > 0) {
+ set_health_status(cont, UNHEALTHY);
+ set_monitor_exist_flag(cont->health_check, false);
+ }
+
+ cont->health_check->monitor_tid = 0;
}
// Called when the container is being stopped (whether because the health check is
@@ -228,6 +262,8 @@ static health_check_manager_t *health_check_manager_new()
health_check->monitor_exist = false;
+ health_check->monitor_tid = 0;
+
return health_check;
cleanup:
health_check_manager_free(health_check);
@@ -887,20 +923,15 @@ void container_update_health_monitor(const char *container_id)
want_running = container_is_running(cont->state) && !container_is_paused(cont->state) && probe != HEALTH_NONE;
if (want_running) {
- pthread_t monitor_tid = { 0 };
char *cid = util_strdup_s(container_id);
// ensured that the health check monitor process is stopped
close_health_check_monitor(cont);
init_monitor_idle_status(cont->health_check);
- if (pthread_create(&monitor_tid, NULL, health_check_monitor, (void *)cid)) {
+ if (pthread_create(&cont->health_check->monitor_tid, NULL, health_check_monitor, (void *)cid)) {
free(cid);
ERROR("Failed to create thread to monitor health check...");
goto out;
}
- if (pthread_detach(monitor_tid)) {
- ERROR("Failed to detach the health check monitor thread");
- goto out;
- }
} else {
close_health_check_monitor(cont);
}
--
2.25.1

View File

@ -0,0 +1,225 @@
From 5ec7b92cf94a4b7eb4587763aab9f814286bfe27 Mon Sep 17 00:00:00 2001
From: zhongtao <zhongtao17@huawei.com>
Date: Thu, 27 Jul 2023 20:02:33 +1400
Subject: [PATCH 02/11] add health check abnormal test_cases
Signed-off-by: zhongtao <zhongtao17@huawei.com>
---
CI/install_depends.sh | 1 +
CI/pr-gateway.sh | 1 +
CI/test_cases/container_cases/health_check.sh | 90 +++++++++++++++----
3 files changed, 76 insertions(+), 16 deletions(-)
diff --git a/CI/install_depends.sh b/CI/install_depends.sh
index 88922686..df0474c7 100755
--- a/CI/install_depends.sh
+++ b/CI/install_depends.sh
@@ -91,6 +91,7 @@ check_make_status make_cni_plugins ${build_log_cni_plugins} &
cd ~
git clone https://gitee.com/src-openeuler/lxc.git
cd lxc
+git checkout origin/openEuler-22.03-LTS-SP2
tar xf lxc-4.0.3.tar.gz
cd lxc-4.0.3
mv ../*.patch .
diff --git a/CI/pr-gateway.sh b/CI/pr-gateway.sh
index b3da52d0..8223ddf5 100755
--- a/CI/pr-gateway.sh
+++ b/CI/pr-gateway.sh
@@ -35,6 +35,7 @@ cd ~
rm -rf lxc
git clone https://gitee.com/src-openeuler/lxc.git
pushd lxc
+git checkout origin/openEuler-22.03-LTS-SP2
rm -rf lxc-4.0.3
./apply-patches || exit 1
pushd lxc-4.0.3
diff --git a/CI/test_cases/container_cases/health_check.sh b/CI/test_cases/container_cases/health_check.sh
index efb357e0..5e6b5641 100755
--- a/CI/test_cases/container_cases/health_check.sh
+++ b/CI/test_cases/container_cases/health_check.sh
@@ -43,18 +43,30 @@ function do_retry()
return 1
}
-function inspect_container_status()
+function inspect_container_health_status()
{
[[ $(isula inspect -f '{{.State.Health.Status}}' ${1}) == "${2}" ]]
return $?
}
+function inspect_container_status()
+{
+ [[ $(isula inspect -f '{{.State.Status}}' ${1}) == "${2}" ]]
+ return $?
+}
+
function inspect_container_exitcode()
{
[[ $(isula inspect -f '{{.State.ExitCode}}' ${1}) == "${2}" ]]
return $?
}
+function inspect_container_failing_streak()
+{
+ [[ $(isula inspect -f '{{.State.Health.FailingStreak}}' ${1}) == "${2}" ]]
+ return $?
+}
+
function test_health_check_paraments()
{
local ret=0
@@ -73,29 +85,29 @@ function test_health_check_paraments()
[[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - failed to run container with image: ${image}" && ((ret++))
# start period : 2s => do health check => interval: 2s => do health check => exit on unhealthy
- [[ $(isula inspect -f '{{.State.Status}}' ${container_name}) == "running" ]]
+ do_retry ${retry_limit} ${retry_interval} inspect_container_status ${container_name} running
[[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - incorrent container status: not running" && ((ret++))
# finish first health check
sleep 10
- do_retry ${retry_limit} ${retry_interval} inspect_container_status ${container_name} starting
+ do_retry ${retry_limit} ${retry_interval} inspect_container_health_status ${container_name} starting
# keep starting status with health check return non-zero at always until status change to unhealthy
[[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - incorrent container health check status: not starting" && ((ret++))
sleep 6 # finish second health check
success=1
- do_retry ${retry_limit} ${retry_interval} inspect_container_status ${container_name} unhealthy
+ do_retry ${retry_limit} ${retry_interval} inspect_container_health_status ${container_name} unhealthy
[[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - incorrent container health check status: not unhealthy" && ((ret++))
# validate --health-retries option
- [[ $(isula inspect -f '{{.State.Health.FailingStreak}}' ${container_name}) == "2" ]]
- [[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - incorrent container health check status: not unhealthy" && ((ret++))
+ do_retry ${retry_limit} ${retry_interval} inspect_container_failing_streak ${container_name} 2
+ [[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - incorrent container health check failing streak: not 2" && ((ret++))
- [[ $(isula inspect -f '{{.State.Status}}' ${container_name}) == "exited" ]]
+ do_retry ${retry_limit} ${retry_interval} inspect_container_status ${container_name} exited
[[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - incorrent container status: not exited" && ((ret++))
- [[ $(isula inspect -f '{{.State.ExitCode}}' ${container_name}) == "137" ]]
+ do_retry ${retry_limit} ${retry_interval} inspect_container_exitcode ${container_name} 137
[[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - incorrent container exit code: not 137" && ((ret++))
isula rm -f ${container_name}
@@ -123,24 +135,24 @@ function test_health_check_normally()
[[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - failed to run container with image: ${image}" && ((ret++))
# start period : 0s => interval: 2s => do health check => interval: 2s => do health check => ...
- [[ $(isula inspect -f '{{.State.Status}}' ${container_name}) == "running" ]]
+ do_retry ${retry_limit} ${retry_interval} inspect_container_status ${container_name} running
[[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - incorrent container status: not running" && ((ret++))
# Health check has been performed yet
- do_retry ${retry_limit} ${retry_interval} inspect_container_status ${container_name} starting
+ do_retry ${retry_limit} ${retry_interval} inspect_container_health_status ${container_name} starting
# Initial status when the container is still starting
[[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - incorrent container health check status: not starting" && ((ret++))
sleep 8 # finish first health check
- do_retry ${retry_limit} ${retry_interval} inspect_container_status ${container_name} healthy
+ do_retry ${retry_limit} ${retry_interval} inspect_container_health_status ${container_name} healthy
# When the health check returns successfully, status immediately becomes healthy
[[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - incorrent container health check status: not healthy" && ((ret++))
kill -9 $(isula inspect -f '{{.State.Pid}}' ${container_name})
# Wait for the container to be killed
- do_retry ${retry_limit} ${retry_interval} inspect_container_status ${container_name} unhealthy
+ do_retry ${retry_limit} ${retry_interval} inspect_container_health_status ${container_name} unhealthy
# The container process exits abnormally and the health check status becomes unhealthy
[[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - incorrent container health check status: not unhealthy" && ((ret++))
@@ -174,17 +186,17 @@ function test_health_check_timeout()
[[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - failed to run container with image: ${image}" && ((ret++))
# start period : 0s => interval: 5s => do health check(1s timeout) => unhealthy(exited)
- [[ $(isula inspect -f '{{.State.Status}}' ${container_name}) == "running" ]]
+ do_retry ${retry_limit} ${retry_interval} inspect_container_status ${container_name} running
[[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - incorrent container status: not running" && ((ret++))
# Health check has been performed yet
- do_retry ${retry_limit} ${retry_interval} inspect_container_status ${container_name} starting
+ do_retry ${retry_limit} ${retry_interval} inspect_container_health_status ${container_name} starting
# Initial status when the container is still starting
[[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - incorrent container health check status: not starting" && ((ret++))
sleep 7 # finish first health check
- do_retry ${retry_limit} ${retry_interval} inspect_container_status ${container_name} unhealthy
+ do_retry ${retry_limit} ${retry_interval} inspect_container_health_status ${container_name} unhealthy
# The container process exits and the health check status becomes unhealthy
[[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - incorrent container health check status: not unhealthy" && ((ret++))
@@ -229,6 +241,50 @@ function test_health_check_monitor()
return ${ret}
}
+function test_health_check_abnormal()
+{
+ local ret=0
+ local image="busybox"
+ local retry_limit=10
+ local retry_interval=1
+ local test="health check abnormal => (${FUNCNAME[@]})"
+
+ msg_info "${test} starting..."
+
+ isula images | grep ${image}
+ [[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - missing list image: ${image}" && ((ret++))
+
+ container_name="health_check_abnormal"
+ isula run -itd --runtime $1 -n ${container_name} --health-cmd="sleep 999" --health-timeout=1000000s --health-retries=1 ${image} /bin/sh
+ [[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - failed to run container with image: ${image}" && ((ret++))
+
+ do_retry ${retry_limit} ${retry_interval} inspect_container_status ${container_name} running
+ [[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - incorrent container status: not running" && ((ret++))
+
+ # Health check has been performed yet
+ do_retry ${retry_limit} ${retry_interval} inspect_container_health_status ${container_name} starting
+ # Initial status when the container is still starting
+ [[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - incorrent container health check status: not starting" && ((ret++))
+
+ sleep 30 # wait health check exec
+
+ isula stop -t 0 ${container_name} &
+ [[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - failed to stop container: ${container_name}" && ((ret++))
+
+ do_retry ${retry_limit} ${retry_interval} inspect_container_status ${container_name} exited
+ [[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - incorrent container status: not exited" && ((ret++))
+
+ do_retry ${retry_limit} ${retry_interval} inspect_container_health_status ${container_name} unhealthy
+ # The container process exits and the health check status becomes unhealthy
+ [[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - incorrent container health check status: not unhealthy" && ((ret++))
+
+ isula rm -f ${container_name}
+ [[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - failed to remove container: ${container_name}" && ((ret++))
+
+ msg_info "${test} finished with return ${ret}..."
+ return ${ret}
+}
+
declare -i ans=0
for element in ${RUNTIME_LIST[@]};
@@ -244,7 +300,9 @@ do
test_health_check_monitor $element || ((ans++))
+ test_health_check_abnormal $element || ((ans++))
+
msg_info "${test} finished with return ${ans}..."
done
-show_result ${ans} "${curr_path}/${0}"
+show_result ${ans} "${curr_path}/${0}"
\ No newline at end of file
--
2.25.1

View File

@ -0,0 +1,111 @@
From e9f2f6e399c9d232bd57cbee98068236f8f8be91 Mon Sep 17 00:00:00 2001
From: zhongtao <zhongtao17@huawei.com>
Date: Fri, 28 Jul 2023 16:39:57 +1400
Subject: [PATCH 03/11] restore health check fix
Signed-off-by: zhongtao <zhongtao17@huawei.com>
---
src/daemon/modules/api/container_api.h | 1 -
.../container/health_check/health_check.c | 43 +++----------------
2 files changed, 6 insertions(+), 38 deletions(-)
diff --git a/src/daemon/modules/api/container_api.h b/src/daemon/modules/api/container_api.h
index 4c1dd29a..ed97633f 100644
--- a/src/daemon/modules/api/container_api.h
+++ b/src/daemon/modules/api/container_api.h
@@ -51,7 +51,6 @@ typedef struct health_check_manager {
health_check_monitor_status_t monitor_status;
// Used to wait for the health check minotor thread to close
bool monitor_exist;
- pthread_t monitor_tid;
} health_check_manager_t;
typedef struct _container_state_t_ {
diff --git a/src/daemon/modules/container/health_check/health_check.c b/src/daemon/modules/container/health_check/health_check.c
index bd75382f..e9dcbdb9 100644
--- a/src/daemon/modules/container/health_check/health_check.c
+++ b/src/daemon/modules/container/health_check/health_check.c
@@ -169,49 +169,15 @@ static bool get_monitor_exist_flag(health_check_manager_t *health)
static void close_health_check_monitor(container_t *cont)
{
- int64_t timeout = 0;
- /* wait 1 second to cancel monitor thread (2000 * 500 µs) */
- int64_t retries = 2000;
- int ret = -1;
-
if (cont == NULL || cont->health_check == NULL) {
return;
}
- pthread_t monitor_tid = cont->health_check->monitor_tid;
set_monitor_stop_status(cont->health_check);
// ensure that the monitor process exits
while (get_monitor_exist_flag(cont->health_check)) {
util_usleep_nointerupt(500);
- timeout += 1;
- if (timeout <= retries) {
- continue;
- }
- if (monitor_tid <= 0) {
- break;
- }
- DEBUG("Try to cancel monitor thread");
- ret = pthread_cancel(monitor_tid);
- if (ret != 0 && ret != ESRCH) {
- WARN("Failed to cancel monitor thread, try to kill thread");
- pthread_kill(monitor_tid, SIGKILL);
- }
- break;
}
-
- if (monitor_tid > 0 && pthread_join(monitor_tid, NULL) != 0) {
- ERROR("Failed to join monitor thread");
- }
-
- // monitor_tid = 0: it corresponds to the initialization of the health check thread when starting the container.
- // At this time, the purpose is to stop the health check thread process before starting a new health check thread,
- // and there is no need to set the health check status.
- if (monitor_tid > 0) {
- set_health_status(cont, UNHEALTHY);
- set_monitor_exist_flag(cont->health_check, false);
- }
-
- cont->health_check->monitor_tid = 0;
}
// Called when the container is being stopped (whether because the health check is
@@ -262,8 +228,6 @@ static health_check_manager_t *health_check_manager_new()
health_check->monitor_exist = false;
- health_check->monitor_tid = 0;
-
return health_check;
cleanup:
health_check_manager_free(health_check);
@@ -923,15 +887,20 @@ void container_update_health_monitor(const char *container_id)
want_running = container_is_running(cont->state) && !container_is_paused(cont->state) && probe != HEALTH_NONE;
if (want_running) {
+ pthread_t monitor_tid = { 0 };
char *cid = util_strdup_s(container_id);
// ensured that the health check monitor process is stopped
close_health_check_monitor(cont);
init_monitor_idle_status(cont->health_check);
- if (pthread_create(&cont->health_check->monitor_tid, NULL, health_check_monitor, (void *)cid)) {
+ if (pthread_create(&monitor_tid, NULL, health_check_monitor, (void *)cid)) {
free(cid);
ERROR("Failed to create thread to monitor health check...");
goto out;
}
+ if (pthread_detach(monitor_tid)) {
+ ERROR("Failed to detach the health check monitor thread");
+ goto out;
+ }
} else {
close_health_check_monitor(cont);
}
--
2.25.1

View File

@ -0,0 +1,77 @@
From f5e249b6733a88e3ac8872fd5013e418f902683f Mon Sep 17 00:00:00 2001
From: zhongtao <zhongtao17@huawei.com>
Date: Fri, 28 Jul 2023 16:40:09 +1400
Subject: [PATCH 04/11] remove health check abnormal test_cases
Signed-off-by: zhongtao <zhongtao17@huawei.com>
---
CI/test_cases/container_cases/health_check.sh | 46 -------------------
1 file changed, 46 deletions(-)
diff --git a/CI/test_cases/container_cases/health_check.sh b/CI/test_cases/container_cases/health_check.sh
index 5e6b5641..4971ff05 100755
--- a/CI/test_cases/container_cases/health_check.sh
+++ b/CI/test_cases/container_cases/health_check.sh
@@ -241,50 +241,6 @@ function test_health_check_monitor()
return ${ret}
}
-function test_health_check_abnormal()
-{
- local ret=0
- local image="busybox"
- local retry_limit=10
- local retry_interval=1
- local test="health check abnormal => (${FUNCNAME[@]})"
-
- msg_info "${test} starting..."
-
- isula images | grep ${image}
- [[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - missing list image: ${image}" && ((ret++))
-
- container_name="health_check_abnormal"
- isula run -itd --runtime $1 -n ${container_name} --health-cmd="sleep 999" --health-timeout=1000000s --health-retries=1 ${image} /bin/sh
- [[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - failed to run container with image: ${image}" && ((ret++))
-
- do_retry ${retry_limit} ${retry_interval} inspect_container_status ${container_name} running
- [[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - incorrent container status: not running" && ((ret++))
-
- # Health check has been performed yet
- do_retry ${retry_limit} ${retry_interval} inspect_container_health_status ${container_name} starting
- # Initial status when the container is still starting
- [[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - incorrent container health check status: not starting" && ((ret++))
-
- sleep 30 # wait health check exec
-
- isula stop -t 0 ${container_name} &
- [[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - failed to stop container: ${container_name}" && ((ret++))
-
- do_retry ${retry_limit} ${retry_interval} inspect_container_status ${container_name} exited
- [[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - incorrent container status: not exited" && ((ret++))
-
- do_retry ${retry_limit} ${retry_interval} inspect_container_health_status ${container_name} unhealthy
- # The container process exits and the health check status becomes unhealthy
- [[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - incorrent container health check status: not unhealthy" && ((ret++))
-
- isula rm -f ${container_name}
- [[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - failed to remove container: ${container_name}" && ((ret++))
-
- msg_info "${test} finished with return ${ret}..."
- return ${ret}
-}
-
declare -i ans=0
for element in ${RUNTIME_LIST[@]};
@@ -300,8 +256,6 @@ do
test_health_check_monitor $element || ((ans++))
- test_health_check_abnormal $element || ((ans++))
-
msg_info "${test} finished with return ${ans}..."
done
--
2.25.1

View File

@ -0,0 +1,27 @@
From 932c8e83324d03b0088bf8aac2a584ca7b163f69 Mon Sep 17 00:00:00 2001
From: zhongtao <zhongtao17@huawei.com>
Date: Tue, 8 Aug 2023 15:08:45 +1400
Subject: [PATCH 05/11] bugfix for implicit_digest
Signed-off-by: zhongtao <zhongtao17@huawei.com>
---
src/daemon/modules/image/oci/storage/image_store/image_store.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/src/daemon/modules/image/oci/storage/image_store/image_store.c b/src/daemon/modules/image/oci/storage/image_store/image_store.c
index daf08c85..d1b7e402 100644
--- a/src/daemon/modules/image/oci/storage/image_store/image_store.c
+++ b/src/daemon/modules/image/oci/storage/image_store/image_store.c
@@ -2818,7 +2818,8 @@ static int implicit_digest(map_t *digests, image_t *img)
return 0;
}
- if (get_index_by_key((const char **)img->simage->big_data_digests->keys, img->simage->big_data_digests->len,
+ // Find whether the manifest in big_data_digests exists, if not, return 0 directly
+ if (!get_index_by_key((const char **)img->simage->big_data_digests->keys, img->simage->big_data_digests->len,
IMAGE_DIGEST_BIG_DATA_KEY, &index)) {
return 0;
}
--
2.25.1

View File

@ -0,0 +1,66 @@
From c386fe42814ea7755dc67a6d60d9cf6525179c19 Mon Sep 17 00:00:00 2001
From: zhongtao <zhongtao17@huawei.com>
Date: Tue, 8 Aug 2023 16:08:32 +1400
Subject: [PATCH 06/11] add testcases for inspect images by digest after
restarting isulad
Signed-off-by: zhongtao <zhongtao17@huawei.com>
---
CI/test_cases/image_cases/image_digest.sh | 28 ++++++++++++++++++++---
1 file changed, 25 insertions(+), 3 deletions(-)
diff --git a/CI/test_cases/image_cases/image_digest.sh b/CI/test_cases/image_cases/image_digest.sh
index a7cb594a..e30f29f0 100755
--- a/CI/test_cases/image_cases/image_digest.sh
+++ b/CI/test_cases/image_cases/image_digest.sh
@@ -26,6 +26,7 @@ function test_image_with_digest()
{
local ret=0
local image="busybox"
+ local image2="ubuntu"
local image_digest="busybox@sha256:5cd3db04b8be5773388576a83177aff4f40a03457a63855f4b9cbe30542b9a43"
local test="pull && inspect && tag image with digest test => (${FUNCNAME[@]})"
@@ -61,15 +62,36 @@ function test_image_with_digest()
isula rm -f test
[[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - failed to rm container" && ((ret++))
- isula inspect -f '{{.image.repo_tags}}' ${image} | grep "${image}:latest"
- [[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - invalid image repo tags: ${image}" && ((ret++))
-
isula rmi ${image_digest}
[[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - failed to remove image ${image_digest}" && ((ret++))
isula rmi ${image}:digest_test
[[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - failed to remove image ${image}:digest_test" && ((ret++))
+ isula inspect -f '{{.image.repo_tags}}' ${image_digest} | grep "${image}:digest_test"
+ [[ $? -eq 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - image digest delete error: ${image_digest}" && ((ret++))
+
+ isula pull docker.io/library/${image2}:latest
+ [[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - failed to pull image: ${image2}" && return ${FAILURE}
+
+ digest=$(isula inspect "${image2}:latest" | grep "@sha256" | awk -F"\"" '{print $2}')
+ [[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - failed to get digest for: ${image2}" && return ${FAILURE}
+
+ isula inspect $digest
+ [[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - failed to inspect ${image2} by digest" && return ${FAILURE}
+
+ check_valgrind_log
+ [[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - stop isulad failed" && ((ret++))
+
+ start_isulad_with_valgrind
+ [[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - start isulad failed" && ((ret++))
+
+ isula inspect $digest
+ [[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - failed to inspect ${image2} by digest" && return ${FAILURE}
+
+ isula rmi ${image2}:latest
+ [[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - failed to remove image ${image2}" && ((ret++))
+
msg_info "${test} finished with return ${ret}..."
return ${ret}
}
--
2.25.1

View File

@ -0,0 +1,546 @@
From e44b2fc5ee8c8b9ca577265e88ae3518828cf8a0 Mon Sep 17 00:00:00 2001
From: zhongtao <zhongtao17@huawei.com>
Date: Tue, 8 Aug 2023 06:50:07 +0000
Subject: [PATCH 07/11] !2095 fix loading of nsswitch based config inside
chroot under glibc * fix loading of nsswitch based config inside chroot under
glibc
---
cmake/checker.cmake | 6 +
src/CMakeLists.txt | 2 +-
src/cmd/isulad/main.c | 3 +
src/daemon/modules/api/leftover_cleanup_api.h | 2 +
.../container/leftover_cleanup/cleanup.c | 80 +++++++
.../container/leftover_cleanup/cleanup.h | 1 +
.../leftover_cleanup/leftover_cleanup_api.c | 4 +
src/utils/tar/util_archive.c | 217 ++++++++++++++++--
8 files changed, 299 insertions(+), 16 deletions(-)
diff --git a/cmake/checker.cmake b/cmake/checker.cmake
index fea4f925..000c5e0c 100644
--- a/cmake/checker.cmake
+++ b/cmake/checker.cmake
@@ -30,6 +30,12 @@ else()
message("-- found linux capability.h --- no")
endif()
+# check libcapability
+pkg_check_modules(PC_LIBCAP REQUIRED "libcap")
+find_library(CAP_LIBRARY cap
+ HINTS ${PC_LIBCAP_LIBDIR} ${PC_CAP_LIBRARY_DIRS})
+_CHECK(CAP_LIBRARY "CAP_LIBRARY-NOTFOUND" "libcap.so")
+
# check zlib
pkg_check_modules(PC_ZLIB "zlib>=1.2.8")
find_path(ZLIB_INCLUDE_DIR zlib.h
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 02d7b13f..7201a030 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -48,7 +48,7 @@ target_include_directories(libisulad_tools
PUBLIC ${ISULA_LIBUTILS_INCLUDE_DIR}
)
set_target_properties(libisulad_tools PROPERTIES PREFIX "")
-target_link_libraries(libisulad_tools ${ZLIB_LIBRARY} ${ISULA_LIBUTILS_LIBRARY} ${CRYPTO_LIBRARY})
+target_link_libraries(libisulad_tools ${ZLIB_LIBRARY} ${ISULA_LIBUTILS_LIBRARY} ${CRYPTO_LIBRARY} ${CAP_LIBRARY})
if (ENABLE_OCI_IMAGE)
target_link_libraries(libisulad_tools ${LIBARCHIVE_LIBRARY})
diff --git a/src/cmd/isulad/main.c b/src/cmd/isulad/main.c
index 0cb7b50e..1e51a8e7 100644
--- a/src/cmd/isulad/main.c
+++ b/src/cmd/isulad/main.c
@@ -1254,6 +1254,9 @@ static int isulad_server_init_common()
goto out;
}
#endif
+ // clean tmpdir before image module init
+ // because tmpdir will remove failed if chroot mount point exist under tmpdir
+ isulad_tmpdir_cleaner();
if (volume_init(args->json_confs->graph) != 0) {
ERROR("Failed to init volume");
diff --git a/src/daemon/modules/api/leftover_cleanup_api.h b/src/daemon/modules/api/leftover_cleanup_api.h
index 26c4509b..0a8f9e0e 100644
--- a/src/daemon/modules/api/leftover_cleanup_api.h
+++ b/src/daemon/modules/api/leftover_cleanup_api.h
@@ -31,6 +31,8 @@ void clean_module_fill_ctx(cleanup_ctx_data_t data_type, void *data);
void clean_module_do_clean();
+void isulad_tmpdir_cleaner(void);
+
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
diff --git a/src/daemon/modules/container/leftover_cleanup/cleanup.c b/src/daemon/modules/container/leftover_cleanup/cleanup.c
index 664988b5..eb9b5afb 100644
--- a/src/daemon/modules/container/leftover_cleanup/cleanup.c
+++ b/src/daemon/modules/container/leftover_cleanup/cleanup.c
@@ -12,7 +12,11 @@
* Create: 2022-10-31
* Description: provide cleanup functions
*********************************************************************************/
+#include <sys/mount.h>
+
#include "utils.h"
+#include "utils_fs.h"
+#include "path.h"
#include "cleanup.h"
#include "oci_rootfs_clean.h"
@@ -132,3 +136,79 @@ void cleaners_do_clean(struct cleaners *clns, struct clean_ctx *ctx)
}
}
}
+
+// always return true;
+// if umount/remove failed, just ignore it
+static bool walk_isulad_tmpdir_cb(const char *path_name, const struct dirent *sub_dir, void *context)
+{
+ int nret = 0;
+ char tmpdir[PATH_MAX] = { 0 };
+ const char *chroot_prefix = "tar-chroot-";
+
+ if (sub_dir == NULL || !util_has_prefix(sub_dir->d_name, chroot_prefix)) {
+ // only umount/remove chroot directory
+ return true;
+ }
+
+ nret = snprintf(tmpdir, PATH_MAX, "%s/%s", path_name, sub_dir->d_name);
+ if (nret < 0 || nret >= PATH_MAX) {
+ WARN("Failed to snprintf for %s", sub_dir->d_name);
+ return true;
+ }
+
+ if (util_detect_mounted(tmpdir)) {
+ if (umount(tmpdir) != 0) {
+ ERROR("Failed to umount target %s, error: %s", tmpdir, strerror(errno));
+ }
+ }
+
+ if (util_path_remove(tmpdir) != 0) {
+ WARN("Failed to remove path %s", tmpdir);
+ }
+
+ return true;
+}
+
+static void cleanup_path(char *dir)
+{
+ int nret;
+ char tmp_dir[PATH_MAX] = { 0 };
+ char cleanpath[PATH_MAX] = { 0 };
+
+ nret = snprintf(tmp_dir, PATH_MAX, "%s/isulad_tmpdir", dir);
+ if (nret < 0 || nret >= PATH_MAX) {
+ ERROR("Failed to snprintf");
+ return;
+ }
+
+ if (util_clean_path(tmp_dir, cleanpath, sizeof(cleanpath)) == NULL) {
+ ERROR("clean path for %s failed", tmp_dir);
+ return;
+ }
+
+ if (!util_dir_exists(cleanpath)) {
+ return;
+ }
+
+ nret = util_scan_subdirs(cleanpath, walk_isulad_tmpdir_cb, NULL);
+ if (nret != 0) {
+ ERROR("failed to scan isulad tmp subdirs");
+ }
+}
+
+// try to umount/remove isulad_tmpdir/tar-chroot-XXX directory
+// ignore return value
+void do_isulad_tmpdir_cleaner(void)
+{
+ char *isula_tmp_dir = NULL;
+
+ isula_tmp_dir = getenv("ISULAD_TMPDIR");
+ if (util_valid_str(isula_tmp_dir)) {
+ cleanup_path(isula_tmp_dir);
+ }
+ // No matter whether ISULAD_TMPDIR is set or not,
+ // clean up the "/tmp" directory to prevent the mount point from remaining
+ cleanup_path("/tmp");
+
+ return;
+}
diff --git a/src/daemon/modules/container/leftover_cleanup/cleanup.h b/src/daemon/modules/container/leftover_cleanup/cleanup.h
index 8dd5e9bd..7ad124f4 100644
--- a/src/daemon/modules/container/leftover_cleanup/cleanup.h
+++ b/src/daemon/modules/container/leftover_cleanup/cleanup.h
@@ -45,6 +45,7 @@ void destroy_cleaners(struct cleaners *clns);
void cleaners_do_clean(struct cleaners *clns, struct clean_ctx *ctx);
+void do_isulad_tmpdir_cleaner(void);
#if defined(__cplusplus) || defined(c_plusplus)
}
diff --git a/src/daemon/modules/container/leftover_cleanup/leftover_cleanup_api.c b/src/daemon/modules/container/leftover_cleanup/leftover_cleanup_api.c
index a20dbc3a..fc5b55e1 100644
--- a/src/daemon/modules/container/leftover_cleanup/leftover_cleanup_api.c
+++ b/src/daemon/modules/container/leftover_cleanup/leftover_cleanup_api.c
@@ -76,3 +76,7 @@ void clean_module_do_clean()
g_clean_ctx = NULL;
}
+void isulad_tmpdir_cleaner(void)
+{
+ do_isulad_tmpdir_cleaner();
+}
diff --git a/src/utils/tar/util_archive.c b/src/utils/tar/util_archive.c
index 630ad8f8..c72e63b8 100644
--- a/src/utils/tar/util_archive.c
+++ b/src/utils/tar/util_archive.c
@@ -27,6 +27,10 @@
#include <stdarg.h>
#include <stdint.h>
#include <libgen.h>
+#include <pwd.h>
+#include <netdb.h>
+#include <sys/mount.h>
+#include <sys/capability.h>
#include "stdbool.h"
#include "utils.h"
@@ -54,6 +58,7 @@ struct archive_context {
int stdout_fd;
int stderr_fd;
pid_t pid;
+ char *safe_dir;
};
struct archive_content_data {
@@ -72,6 +77,123 @@ ssize_t read_content(struct archive *a, void *client_data, const void **buff)
return mydata->content->read(mydata->content->context, mydata->buff, sizeof(mydata->buff));
}
+static void do_disable_unneccessary_caps()
+{
+ cap_t caps;
+ caps = cap_get_proc();
+ if (caps == NULL) {
+ SYSERROR("Failed to do get cap");
+ return;
+ }
+ cap_value_t cap_list[] = { CAP_SETUID };
+ // clear all capabilities
+ cap_clear(caps);
+
+ if (cap_set_flag(caps, CAP_EFFECTIVE, sizeof(cap_list) / sizeof(cap_value_t), cap_list, CAP_SET) != 0) {
+ SYSERROR("Failed to clear caps");
+ return;
+ }
+
+ cap_set_proc(caps);
+ cap_free(caps);
+}
+
+static int make_safedir_is_noexec(const char *dstdir, char **safe_dir)
+{
+ struct stat buf;
+ char *isulad_tmpdir_env = NULL;
+ char isula_tmpdir[PATH_MAX] = { 0 };
+ char cleanpath[PATH_MAX] = { 0 };
+ char tmp_dir[PATH_MAX] = { 0 };
+ int nret;
+
+ isulad_tmpdir_env = getenv("ISULAD_TMPDIR");
+ if (!util_valid_str(isulad_tmpdir_env)) {
+ // if not setted isulad tmpdir, just use /tmp
+ isulad_tmpdir_env = "/tmp";
+ }
+
+ nret = snprintf(isula_tmpdir, PATH_MAX, "%s/isulad_tmpdir", isulad_tmpdir_env);
+ if (nret < 0 || nret >= PATH_MAX) {
+ ERROR("Failed to snprintf");
+ return -1;
+ }
+
+ if (util_clean_path(isula_tmpdir, cleanpath, sizeof(cleanpath)) == NULL) {
+ ERROR("clean path for %s failed", isula_tmpdir);
+ return -1;
+ }
+
+ nret = snprintf(tmp_dir, PATH_MAX, "%s/tar-chroot-XXXXXX", cleanpath);
+ if (nret < 0 || nret >= PATH_MAX) {
+ ERROR("Failed to snprintf string");
+ return -1;
+ }
+
+ if (stat(dstdir, &buf) < 0) {
+ SYSERROR("Check chroot dir failed");
+ return -1;
+ }
+
+ // ensure parent dir is exist
+ if (util_mkdir_p(cleanpath, buf.st_mode) != 0) {
+ return -1;
+ }
+
+ if (mkdtemp(tmp_dir) == NULL) {
+ SYSERROR("Create temp dir failed");
+ return -1;
+ }
+
+ // ensure mode of new safe dir, same to dstdir
+ if (util_mkdir_p(tmp_dir, buf.st_mode) != 0) {
+ return -1;
+ }
+
+ if (mount(dstdir, tmp_dir, "none", MS_BIND, NULL) != 0) {
+ SYSERROR("Mount safe dir failed");
+ if (util_path_remove(tmp_dir) != 0) {
+ ERROR("Failed to remove path %s", tmp_dir);
+ }
+ return -1;
+ }
+
+ if (mount(tmp_dir, tmp_dir, "none", MS_BIND | MS_REMOUNT | MS_NOEXEC, NULL) != 0) {
+ SYSERROR("Mount safe dir failed");
+ if (umount(tmp_dir) != 0) {
+ ERROR("Failed to umount target %s", tmp_dir);
+ }
+ if (util_path_remove(tmp_dir) != 0) {
+ ERROR("Failed to remove path %s", tmp_dir);
+ }
+ return -1;
+ }
+
+ *safe_dir = util_strdup_s(tmp_dir);
+ return 0;
+}
+
+// fix loading of nsswitch based config inside chroot under glibc
+static int do_safe_chroot(const char *dstdir)
+{
+ // don't call getpwnam
+ // because it will change file with nobody uid/gid which copied from host to container
+ // if nobody uid/gid is different between host and container
+
+ // set No New Privileges
+ prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
+
+ if (chroot(dstdir) != 0) {
+ SYSERROR("Failed to chroot to %s", dstdir);
+ fprintf(stderr, "Failed to chroot to %s: %s", dstdir, strerror(errno));
+ return -1;
+ }
+
+ do_disable_unneccessary_caps();
+
+ return 0;
+}
+
static bool overlay_whiteout_convert_read(struct archive_entry *entry, const char *dst_path, map_t *unpacked_path_map)
{
bool do_write = true;
@@ -597,6 +719,12 @@ int archive_unpack(const struct io_read_wrapper *content, const char *dstdir, co
int keepfds[] = { -1, -1, -1 };
int pipe_stderr[2] = { -1, -1 };
char errbuf[BUFSIZ + 1] = { 0 };
+ char *safe_dir = NULL;
+
+ if (make_safedir_is_noexec(dstdir, &safe_dir) != 0) {
+ ERROR("Prepare safe dir failed");
+ return -1;
+ }
if (pipe2(pipe_stderr, O_CLOEXEC) != 0) {
ERROR("Failed to create pipe");
@@ -629,9 +757,9 @@ int archive_unpack(const struct io_read_wrapper *content, const char *dstdir, co
goto child_out;
}
- if (chroot(dstdir) != 0) {
- SYSERROR("Failed to chroot to %s", dstdir);
- fprintf(stderr, "Failed to chroot to %s: %s", dstdir, strerror(errno));
+ if (do_safe_chroot(safe_dir) != 0) {
+ SYSERROR("Failed to chroot to %s", safe_dir);
+ fprintf(stderr, "Failed to chroot to %s: %s", safe_dir, strerror(errno));
ret = -1;
goto child_out;
}
@@ -669,6 +797,13 @@ cleanup:
if (errmsg != NULL && strlen(errbuf) != 0) {
*errmsg = util_strdup_s(errbuf);
}
+ if (umount(safe_dir) != 0) {
+ ERROR("Failed to umount target %s", safe_dir);
+ }
+ if (util_path_remove(safe_dir) != 0) {
+ ERROR("Failed to remove path %s", safe_dir);
+ }
+ free(safe_dir);
return ret;
}
@@ -982,6 +1117,12 @@ int archive_chroot_tar(char *path, char *file, char **errmsg)
int keepfds[] = { -1, -1 };
char errbuf[BUFSIZ + 1] = { 0 };
int fd = 0;
+ char *safe_dir = NULL;
+
+ if (make_safedir_is_noexec(path, &safe_dir) != 0) {
+ ERROR("Prepare safe dir failed");
+ return -1;
+ }
if (pipe2(pipe_for_read, O_CLOEXEC) != 0) {
ERROR("Failed to create pipe");
@@ -1021,9 +1162,9 @@ int archive_chroot_tar(char *path, char *file, char **errmsg)
goto child_out;
}
- if (chroot(path) != 0) {
- ERROR("Failed to chroot to %s", path);
- fprintf(stderr, "Failed to chroot to %s\n", path);
+ if (do_safe_chroot(safe_dir) != 0) {
+ ERROR("Failed to chroot to %s", safe_dir);
+ fprintf(stderr, "Failed to chroot to %s\n", safe_dir);
ret = -1;
goto child_out;
}
@@ -1064,7 +1205,13 @@ cleanup:
if (errmsg != NULL && strlen(errbuf) != 0) {
*errmsg = util_strdup_s(errbuf);
}
-
+ if (umount(safe_dir) != 0) {
+ ERROR("Failed to umount target %s", safe_dir);
+ }
+ if (util_path_remove(safe_dir) != 0) {
+ ERROR("Failed to remove path %s", safe_dir);
+ }
+ free(safe_dir);
return ret;
}
@@ -1169,6 +1316,16 @@ static int archive_context_close(void *context, char **err)
ret = -1;
}
+ if (ctx->safe_dir != NULL) {
+ if (umount(ctx->safe_dir) != 0) {
+ ERROR("Failed to umount target %s", ctx->safe_dir);
+ }
+ if (util_path_remove(ctx->safe_dir) != 0) {
+ ERROR("Failed to remove path %s", ctx->safe_dir);
+ }
+ free(ctx->safe_dir);
+ ctx->safe_dir = NULL;
+ }
free(marshaled);
free(ctx);
return ret;
@@ -1192,10 +1349,10 @@ int archive_chroot_untar_stream(const struct io_read_wrapper *context, const cha
.src_base = src_base,
.dst_base = dst_base
};
+ char *safe_dir = NULL;
- buf = util_common_calloc_s(buf_len);
- if (buf == NULL) {
- ERROR("Out of memory");
+ if (make_safedir_is_noexec(chroot_dir, &safe_dir) != 0) {
+ ERROR("Prepare safe dir failed");
return -1;
}
@@ -1232,8 +1389,8 @@ int archive_chroot_untar_stream(const struct io_read_wrapper *context, const cha
goto child_out;
}
- if (chroot(chroot_dir) != 0) {
- SYSERROR("Failed to chroot to %s", chroot_dir);
+ if (do_safe_chroot(safe_dir) != 0) {
+ SYSERROR("Failed to chroot to %s", safe_dir);
ret = -1;
goto child_out;
}
@@ -1262,6 +1419,12 @@ child_out:
close(pipe_stream[0]);
pipe_stream[0] = -1;
+ buf = util_common_calloc_s(buf_len);
+ if (buf == NULL) {
+ ERROR("Out of memory");
+ goto cleanup;
+ }
+
ctx = util_common_calloc_s(sizeof(struct archive_context));
if (ctx == NULL) {
goto cleanup;
@@ -1292,6 +1455,13 @@ cleanup:
ret = (cret != 0) ? cret : ret;
close_archive_pipes_fd(pipe_stderr, 2);
close_archive_pipes_fd(pipe_stream, 2);
+ if (umount(safe_dir) != 0) {
+ ERROR("Failed to umount target %s", safe_dir);
+ }
+ if (util_path_remove(safe_dir) != 0) {
+ ERROR("Failed to remove path %s", safe_dir);
+ }
+ free(safe_dir);
return ret;
}
@@ -1306,6 +1476,12 @@ int archive_chroot_tar_stream(const char *chroot_dir, const char *tar_path, cons
int ret = -1;
pid_t pid;
struct archive_context *ctx = NULL;
+ char *safe_dir = NULL;
+
+ if (make_safedir_is_noexec(chroot_dir, &safe_dir) != 0) {
+ ERROR("Prepare safe dir failed");
+ return -1;
+ }
if (pipe(pipe_stderr) != 0) {
ERROR("Failed to create pipe: %s", strerror(errno));
@@ -1343,9 +1519,9 @@ int archive_chroot_tar_stream(const char *chroot_dir, const char *tar_path, cons
goto child_out;
}
- if (chroot(chroot_dir) != 0) {
- ERROR("Failed to chroot to %s", chroot_dir);
- fprintf(stderr, "Failed to chroot to %s\n", chroot_dir);
+ if (do_safe_chroot(safe_dir) != 0) {
+ ERROR("Failed to chroot to %s", safe_dir);
+ fprintf(stderr, "Failed to chroot to %s\n", safe_dir);
ret = -1;
goto child_out;
}
@@ -1395,6 +1571,8 @@ child_out:
ctx->stderr_fd = pipe_stderr[0];
pipe_stderr[0] = -1;
ctx->pid = pid;
+ ctx->safe_dir = safe_dir;
+ safe_dir = NULL;
reader->close = archive_context_close;
reader->context = ctx;
@@ -1406,6 +1584,15 @@ free_out:
close_archive_pipes_fd(pipe_stderr, 2);
close_archive_pipes_fd(pipe_stream, 2);
free(ctx);
+ if (safe_dir != NULL) {
+ if (umount(safe_dir) != 0) {
+ ERROR("Failed to umount target %s", safe_dir);
+ }
+ if (util_path_remove(safe_dir) != 0) {
+ ERROR("Failed to remove path %s", safe_dir);
+ }
+ free(safe_dir);
+ }
return ret;
}
--
2.25.1

View File

@ -0,0 +1,100 @@
From 0f080a7f31a388eae006b2135ddeb1d6489d643a Mon Sep 17 00:00:00 2001
From: zhongtao <zhongtao17@huawei.com>
Date: Wed, 9 Aug 2023 11:08:13 +1400
Subject: [PATCH 08/11] Added restrictions on container health checks
Signed-off-by: zhongtao <zhongtao17@huawei.com>
---
.../executor/container_cb/execution_create.c | 2 +-
src/daemon/modules/spec/verify.c | 24 ++++++++++++++++---
src/daemon/modules/spec/verify.h | 2 +-
3 files changed, 23 insertions(+), 5 deletions(-)
diff --git a/src/daemon/executor/container_cb/execution_create.c b/src/daemon/executor/container_cb/execution_create.c
index 29b5fc5e..4d10e9e0 100644
--- a/src/daemon/executor/container_cb/execution_create.c
+++ b/src/daemon/executor/container_cb/execution_create.c
@@ -1457,7 +1457,7 @@ int container_create_cb(const container_create_request *request, container_creat
goto clean_rootfs;
}
- if (verify_container_config(v2_spec->config) != 0) {
+ if (verify_container_config(v2_spec->config, runtime) != 0) {
cc = ISULAD_ERR_EXEC;
goto clean_rootfs;
}
diff --git a/src/daemon/modules/spec/verify.c b/src/daemon/modules/spec/verify.c
index fe53bb0f..64cf7f18 100644
--- a/src/daemon/modules/spec/verify.c
+++ b/src/daemon/modules/spec/verify.c
@@ -2160,7 +2160,12 @@ static inline bool is_less_than_one_second(int64_t timeout)
return timeout != 0 && timeout < Time_Second;
}
-static int verify_health_check_parameter(const container_config *container_spec)
+static inline bool is_more_than_ten_minutes(int64_t timeout)
+{
+ return timeout > (10LL * Time_Minute);
+}
+
+static int verify_health_check_parameter(const container_config *container_spec, const char *runtime)
{
int ret = 0;
@@ -2168,6 +2173,13 @@ static int verify_health_check_parameter(const container_config *container_spec)
return ret;
}
+ if (strcasecmp(runtime, "kata-runtime") == 0) {
+ ERROR("kata-runtime does not support command line health check");
+ isulad_set_error_message("kata-runtime does not support command line health check");
+ ret = -1;
+ goto out;
+ }
+
if (is_less_than_one_second(container_spec->healthcheck->interval)) {
ERROR("Interval in Healthcheck cannot be less than one second");
isulad_set_error_message("Interval in Healthcheck cannot be less than one second");
@@ -2180,6 +2192,12 @@ static int verify_health_check_parameter(const container_config *container_spec)
ret = -1;
goto out;
}
+ if (is_more_than_ten_minutes(container_spec->healthcheck->timeout)) {
+ ERROR("Timeout in Healthcheck cannot be more than ten minutes");
+ isulad_set_error_message("Timeout in Healthcheck cannot be more than ten minutes");
+ ret = -1;
+ goto out;
+ }
if (is_less_than_one_second(container_spec->healthcheck->start_period)) {
ERROR("StartPeriod in Healthcheck cannot be less than one second");
isulad_set_error_message("StartPeriod in Healthcheck cannot be less than one second");
@@ -2219,11 +2237,11 @@ out:
return ret;
}
-int verify_container_config(const container_config *container_spec)
+int verify_container_config(const container_config *container_spec, const char *runtime)
{
int ret = 0;
- if (verify_health_check_parameter(container_spec) != 0) {
+ if (verify_health_check_parameter(container_spec, runtime) != 0) {
ret = -1;
goto out;
}
diff --git a/src/daemon/modules/spec/verify.h b/src/daemon/modules/spec/verify.h
index db54c7ae..21e8fba8 100644
--- a/src/daemon/modules/spec/verify.h
+++ b/src/daemon/modules/spec/verify.h
@@ -33,7 +33,7 @@ int verify_container_settings_start(const oci_runtime_spec *oci_spec);
int verify_host_config_settings(host_config *hostconfig, bool update);
-int verify_container_config(const container_config *container_spec);
+int verify_container_config(const container_config *container_spec, const char *runtime);
#ifdef __cplusplus
}
--
2.25.1

View File

@ -0,0 +1,89 @@
From 8afe42c7385761f1a4840836179023d0fd878ed6 Mon Sep 17 00:00:00 2001
From: zhongtao <zhongtao17@huawei.com>
Date: Wed, 9 Aug 2023 11:44:25 +1400
Subject: [PATCH 09/11] add testcases for restrictions on container health
checks
Signed-off-by: zhongtao <zhongtao17@huawei.com>
---
CI/test_cases/container_cases/health_check.sh | 52 +++++++++++++++++++
1 file changed, 52 insertions(+)
diff --git a/CI/test_cases/container_cases/health_check.sh b/CI/test_cases/container_cases/health_check.sh
index 4971ff05..2800a5db 100755
--- a/CI/test_cases/container_cases/health_check.sh
+++ b/CI/test_cases/container_cases/health_check.sh
@@ -117,6 +117,54 @@ function test_health_check_paraments()
return ${ret}
}
+function test_health_check_invalid_runtime()
+{
+ local ret=0
+ local retry_limit=10
+ local retry_interval=1
+ local invalid_runtime="kata-runtime"
+ local test="test health check with invalid runtime => (${FUNCNAME[@]})"
+
+ msg_info "${test} starting..."
+
+ isula images | grep ${image}
+ [[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - missing list image: ${image}" && ((ret++))
+
+ # health check with invalid runtime
+ container_name="health_check_invalid_para"
+ isula run -itd --runtime ${invalid_runtime} -n ${container_name} --health-cmd 'date' --health-interval 5s ${image} /bin/sh 2>&1 | grep "not support command line health check"
+ [[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - run container with invalid runtime:${invalid_runtime} should be fail" && ((ret++))
+
+ isula rm -f ${container_name}
+
+ msg_info "${test} finished with return ${ret}..."
+ return ${ret}
+}
+
+function test_health_check_invalid_paraments()
+{
+ local ret=0
+ local retry_limit=10
+ local retry_interval=1
+ local test="test health check with invalid paraments => (${FUNCNAME[@]})"
+
+ msg_info "${test} starting..."
+
+ isula images | grep ${image}
+ [[ $? -ne 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - missing list image: ${image}" && ((ret++))
+
+ # health check with invalid timeout
+ container_name="health_check_invalid_para"
+ isula run -itd --runtime $1 -n ${container_name} --health-cmd 'echo "iSulad" ; exit 1' \
+ --health-interval 5s --health-timeout 11m --health-start-period 8s --health-exit-on-unhealthy ${image} /bin/sh
+ [[ $? -eq 0 ]] && msg_err "${FUNCNAME[0]}:${LINENO} - run container with invalid timeout should be fail" && ((ret++))
+
+ isula rm -f ${container_name}
+
+ msg_info "${test} finished with return ${ret}..."
+ return ${ret}
+}
+
function test_health_check_normally()
{
local ret=0
@@ -243,11 +291,15 @@ function test_health_check_monitor()
declare -i ans=0
+test_health_check_invalid_runtime || ((ans++))
+
for element in ${RUNTIME_LIST[@]};
do
test="health check test => (${element})"
msg_info "${test} starting..."
+ test_health_check_invalid_paraments $element || ((ans++))
+
test_health_check_paraments $element || ((ans++))
test_health_check_normally $element || ((ans++))
--
2.25.1

View File

@ -0,0 +1,35 @@
From 64fe739caf3fa473d3b503efb42eaa29774f46d6 Mon Sep 17 00:00:00 2001
From: zhongtao <zhongtao17@huawei.com>
Date: Wed, 9 Aug 2023 02:39:56 +0000
Subject: [PATCH 10/11] !2100 link libcap for storage_layers_ut and
storage_driver_ut * link libcap for storage_layers_ut and storage_driver_ut
---
test/image/oci/storage/layers/CMakeLists.txt | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/test/image/oci/storage/layers/CMakeLists.txt b/test/image/oci/storage/layers/CMakeLists.txt
index ae0ac9c3..330a8306 100644
--- a/test/image/oci/storage/layers/CMakeLists.txt
+++ b/test/image/oci/storage/layers/CMakeLists.txt
@@ -65,7 +65,7 @@ target_link_libraries(${DRIVER_EXE}
${CMAKE_THREAD_LIBS_INIT}
${ISULA_LIBUTILS_LIBRARY}
${LIBTAR_LIBRARY}
- -lwebsockets -lcrypto -lyajl -larchive ${SELINUX_LIBRARY} -ldevmapper -lz)
+ -lwebsockets -lcrypto -lyajl -larchive ${SELINUX_LIBRARY} -ldevmapper -lz -lcap)
add_test(NAME ${DRIVER_EXE} COMMAND ${DRIVER_EXE} --gtest_output=xml:${DRIVER_EXE}-Results.xml)
set_tests_properties(${DRIVER_EXE} PROPERTIES TIMEOUT 120)
@@ -143,7 +143,7 @@ target_link_libraries(${LAYER_EXE}
${CMAKE_THREAD_LIBS_INIT}
${ISULA_LIBUTILS_LIBRARY}
${LIBTAR_LIBRARY}
- -lwebsockets -lcrypto -lyajl -larchive ${SELINUX_LIBRARY} -ldevmapper -lz)
+ -lwebsockets -lcrypto -lyajl -larchive ${SELINUX_LIBRARY} -ldevmapper -lz -lcap)
add_test(NAME ${LAYER_EXE} COMMAND ${LAYER_EXE} --gtest_output=xml:${LAYER_EXE}-Results.xml)
set_tests_properties(${LAYER_EXE} PROPERTIES TIMEOUT 120)
--
2.25.1

View File

@ -0,0 +1,26 @@
From 7d6b24af366a46338c48901cdab555b4e844ac13 Mon Sep 17 00:00:00 2001
From: zhongtao <zhongtao17@huawei.com>
Date: Fri, 11 Aug 2023 10:21:26 +1400
Subject: [PATCH 11/11] modify the help information of health-timeout
Signed-off-by: zhongtao <zhongtao17@huawei.com>
---
src/cmd/isula/base/create.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/cmd/isula/base/create.h b/src/cmd/isula/base/create.h
index 4f63d92f..0239d608 100644
--- a/src/cmd/isula/base/create.h
+++ b/src/cmd/isula/base/create.h
@@ -414,7 +414,7 @@ extern "C" {
"health-timeout", \
0, \
&(cmdargs).custom_conf.health_timeout, \
- "Maximum time to allow one check to run (ms|s|m|h) (default 30s)", \
+ "Maximum time to allow one check to run (ms|s|m) (default 30s, maximum is 10 minutes)", \
command_convert_nanoseconds }, \
{ CMD_OPT_TYPE_CALLBACK, \
false, \
--
2.25.1

View File

@ -1,5 +1,5 @@
%global _version 2.0.18
%global _release 10
%global _release 11
%global is_systemd 1
%global enable_shimv2 1
%global is_embedded 1
@ -108,6 +108,17 @@ Patch0092: 0092-debug-improve-debug-message-for-gc-containers.patch
Patch0093: 0093-refactor-rt_isula_exec-and-shim-log.patch
Patch0094: 0094-add-some-exec-test.patch
Patch0095: 0095-2079-clean-network-reosurces-if-runpodsandbox-failed.patch
Patch0096: 0096-fix-stuck-health-check-blocking-container-stop-bugs.patch
Patch0097: 0097-add-health-check-abnormal-test_cases.patch
Patch0098: 0098-restore-health-check-fix.patch
Patch0099: 0099-remove-health-check-abnormal-test_cases.patch
Patch0100: 0100-bugfix-for-implicit_digest.patch
Patch0101: 0101-add-testcases-for-inspect-images-by-digest-after-res.patch
Patch0102: 0102-fix-loading-of-nsswitch-based-config-inside-chr.patch
Patch0103: 0103-Added-restrictions-on-container-health-checks.patch
Patch0104: 0104-add-testcases-for-restrictions-on-container-health-c.patch
Patch0105: 0105-link-libcap-for-storage_layers_ut-and-storage_d.patch
Patch0106: 0106-modify-the-help-information-of-health-timeout.patch
%ifarch x86_64 aarch64
Provides: libhttpclient.so()(64bit)
@ -352,6 +363,12 @@ fi
%endif
%changelog
* Tue Aug 15 2023 zhongtao <zhongtao17@huawei.com> - 2.0.18-11
- Type: bugfix
- ID: NA
- SUG: NA
- DESC: upgrade from upstream
* Wed Jul 19 2023 zhongtao <zhongtao17@huawei.com> - 2.0.18-10
- Type: bugfix
- ID: NA