fix CVE-2020-27781

Signed-off-by: chixinze <xmdxcxz@gmail.com>
(cherry picked from commit d6f3fb67431b24c824467959ead0f60b0305c7cd)
This commit is contained in:
chixinze 2021-07-18 17:25:28 +08:00 committed by openeuler-sync-bot
parent 30b8085a65
commit 8c4dedf82d
9 changed files with 1037 additions and 1 deletions

View File

@ -0,0 +1,154 @@
From c3f9c972297c4d73a901453e806c16044e570667 Mon Sep 17 00:00:00 2001
From: Rishabh Dave <rishabhddave@gmail.com>
Date: Thu, 7 Jun 2018 12:26:44 +0000
Subject: [PATCH 1/2] ceph-volume-client: allow atomic updates for RADOS
objects
put_object_versioned() takes the version of the object and verifies if
the version of the object is the expected one before updating the data
in the object. This verification of version before actually writing
makes put_objcet_version() atomic.
Rest of the changes include adding get_object_and_version() so that
current version of the object can be obtained and modification of
get_object() and put_object() to use get_object_and_version() and
put_object_versioned() respectively.
Fixes: http://tracker.ceph.com/issues/24173
Signed-off-by: Rishabh Dave <ridave@redhat.com>
(cherry picked from commit ca7253cff6cdac590bb14d0d297c02452bf75bf6)
---
src/pybind/ceph_volume_client.py | 46 +++++++++++++++++++++++++++++---
src/pybind/rados/rados.pyx | 14 ++++++++++
2 files changed, 57 insertions(+), 3 deletions(-)
diff --git a/src/pybind/ceph_volume_client.py b/src/pybind/ceph_volume_client.py
index d38f72be9a3..c43681bef21 100644
--- a/src/pybind/ceph_volume_client.py
+++ b/src/pybind/ceph_volume_client.py
@@ -205,6 +205,7 @@ CEPHFSVOLUMECLIENT_VERSION_HISTORY = """
* 1 - Initial version
* 2 - Added get_object, put_object, delete_object methods to CephFSVolumeClient
* 3 - Allow volumes to be created without RADOS namespace isolation
+ * 4 - Added get_object_and_version, put_object_versioned method to CephFSVolumeClient
"""
@@ -228,7 +229,7 @@ class CephFSVolumeClient(object):
"""
# Current version
- version = 3
+ version = 4
# Where shall we create our volumes?
POOL_PREFIX = "fsvolume_"
@@ -1403,15 +1404,40 @@ class CephFSVolumeClient(object):
:param data: data to write
:type data: bytes
"""
+ return self.put_object_versioned(pool_name, object_name, data)
+
+ def put_object_versioned(self, pool_name, object_name, data, version=None):
+ """
+ Synchronously write data to an object only if version of the object
+ version matches the expected version.
+
+ :param pool_name: name of the pool
+ :type pool_name: str
+ :param object_name: name of the object
+ :type object_name: str
+ :param data: data to write
+ :type data: bytes
+ :param version: expected version of the object to write
+ :type version: int
+ """
ioctx = self.rados.open_ioctx(pool_name)
+
max_size = int(self.rados.conf_get('osd_max_write_size')) * 1024 * 1024
if len(data) > max_size:
msg = ("Data to be written to object '{0}' exceeds "
"{1} bytes".format(object_name, max_size))
log.error(msg)
raise CephFSVolumeClientError(msg)
+
try:
- ioctx.write_full(object_name, data)
+ with rados.WriteOpCtx(ioctx) as wop:
+ if version is not None:
+ wop.assert_version(version)
+ wop.write_full(data)
+ ioctx.operate_write_op(wop, object_name)
+ except rados.OSError as e:
+ log.error(e)
+ raise e
finally:
ioctx.close()
@@ -1426,6 +1452,19 @@ class CephFSVolumeClient(object):
:returns: bytes - data read from object
"""
+ return self.get_object_and_version(pool_name, object_name)[0]
+
+ def get_object_and_version(self, pool_name, object_name):
+ """
+ Synchronously read data from object and get its version.
+
+ :param pool_name: name of the pool
+ :type pool_name: str
+ :param object_name: name of the object
+ :type object_name: str
+
+ :returns: tuple of object data and version
+ """
ioctx = self.rados.open_ioctx(pool_name)
max_size = int(self.rados.conf_get('osd_max_write_size')) * 1024 * 1024
try:
@@ -1434,9 +1473,10 @@ class CephFSVolumeClient(object):
(ioctx.read(object_name, 1, offset=max_size))):
log.warning("Size of object {0} exceeds '{1}' bytes "
"read".format(object_name, max_size))
+ obj_version = ioctx.get_last_version()
finally:
ioctx.close()
- return bytes_read
+ return (bytes_read, obj_version)
def delete_object(self, pool_name, object_name):
ioctx = self.rados.open_ioctx(pool_name)
diff --git a/src/pybind/rados/rados.pyx b/src/pybind/rados/rados.pyx
index e9829937a11..c0df28645b8 100644
--- a/src/pybind/rados/rados.pyx
+++ b/src/pybind/rados/rados.pyx
@@ -284,6 +284,7 @@ cdef extern from "rados/librados.h" nogil:
void rados_write_op_create(rados_write_op_t write_op, int exclusive, const char *category)
void rados_write_op_append(rados_write_op_t write_op, const char *buffer, size_t len)
void rados_write_op_write_full(rados_write_op_t write_op, const char *buffer, size_t len)
+ void rados_write_op_assert_version(rados_write_op_t write_op, uint64_t ver)
void rados_write_op_write(rados_write_op_t write_op, const char *buffer, size_t len, uint64_t offset)
void rados_write_op_remove(rados_write_op_t write_op)
void rados_write_op_truncate(rados_write_op_t write_op, uint64_t offset)
@@ -1941,6 +1942,19 @@ cdef class WriteOp(object):
with nogil:
rados_write_op_write(self.write_op, _to_write, length, _offset)
+ @requires(('version', int))
+ def assert_version(self, version):
+ """
+ Check if object's version is the expected one.
+ :param version: expected version of the object
+ :param type: int
+ """
+ cdef:
+ uint64_t _version = version
+
+ with nogil:
+ rados_write_op_assert_version(self.write_op, _version)
+
@requires(('offset', int), ('length', int))
def zero(self, offset, length):
"""
--
2.23.0

View File

@ -0,0 +1,49 @@
From ee6625c29179dd5aa34b2da4d9af75e87f13316e Mon Sep 17 00:00:00 2001
From: Rishabh Dave <rishabhddave@gmail.com>
Date: Thu, 7 Jun 2018 12:29:36 +0000
Subject: [PATCH 2/2] qa/ceph-volume: add a test for put_object_versioned()
Test if the version passed to put_object_versioned() is used to
crosscheck.
Signed-off-by: Rishabh Dave <ridave@redhat.com>
(cherry picked from commit 8ab6f84d5799cf2f32fb2b08168ff1cfb82f7d15)
---
qa/tasks/cephfs/test_volume_client.py | 21 +++++++++++++++++++++
1 file changed, 21 insertions(+)
diff --git a/qa/tasks/cephfs/test_volume_client.py b/qa/tasks/cephfs/test_volume_client.py
index 9be7fc2fff5..cf135bce122 100644
--- a/qa/tasks/cephfs/test_volume_client.py
+++ b/qa/tasks/cephfs/test_volume_client.py
@@ -970,6 +970,27 @@ vc.disconnect()
obj_data = obj_data
)))
+ def test_put_object_versioned(self):
+ vc_mount = self.mounts[1]
+ vc_mount.umount_wait()
+ self._configure_vc_auth(vc_mount, "manila")
+
+ obj_data = 'test_data'
+ obj_name = 'test_vc_ob_2'
+ pool_name = self.fs.get_data_pool_names()[0]
+ self.fs.rados(['put', obj_name, '-'], pool=pool_name, stdin_data=obj_data)
+
+ # Test if put_object_versioned() crosschecks the version of the
+ # given object. Being a negative test, an exception is expected.
+ with self.assertRaises(CommandFailedError):
+ self._volume_client_python(vc_mount, dedent("""
+ data, version = vc.get_object_and_version("{pool_name}", "{obj_name}")
+ data += 'm1'
+ vc.put_object("{pool_name}", "{obj_name}", data)
+ data += 'm2'
+ vc.put_object_versioned("{pool_name}", "{obj_name}", data, version)
+ """).format(pool_name=pool_name, obj_name=obj_name))
+
def test_delete_object(self):
vc_mount = self.mounts[1]
vc_mount.umount_wait()
--
2.23.0

View File

@ -0,0 +1,160 @@
From 7012f930e09275889857b9c800d087fb0c3e34a8 Mon Sep 17 00:00:00 2001
From: Rishabh Dave <ridave@redhat.com>
Date: Tue, 15 May 2018 06:06:39 +0000
Subject: [PATCH] qa: make test_volume_client.py py3 compatible
Signed-off-by: Rishabh Dave <ridave@redhat.com>
(cherry picked from commit f28274dc70aa102e3c4523059a65e5da8c8a0426)
---
qa/tasks/cephfs/test_volume_client.py | 35 +++++++++++++++------------
1 file changed, 20 insertions(+), 15 deletions(-)
diff --git a/qa/tasks/cephfs/test_volume_client.py b/qa/tasks/cephfs/test_volume_client.py
index 2e0bf6751e3..d94e2fa7b92 100644
--- a/qa/tasks/cephfs/test_volume_client.py
+++ b/qa/tasks/cephfs/test_volume_client.py
@@ -24,6 +24,7 @@ class TestVolumeClient(CephFSTestCase):
if ns_prefix:
ns_prefix = "\"" + ns_prefix + "\""
return client.run_python("""
+from __future__ import print_function
from ceph_volume_client import CephFSVolumeClient, VolumePath
import logging
log = logging.getLogger("ceph_volume_client")
@@ -101,7 +102,7 @@ vc.disconnect()
vp = VolumePath("{group_id}", "{volume_id}")
auth_result = vc.authorize(vp, "{guest_entity}", readonly={readonly},
tenant_id="{tenant_id}")
- print auth_result['auth_key']
+ print(auth_result['auth_key'])
""".format(
group_id=group_id,
volume_id=volume_id,
@@ -198,7 +199,7 @@ vc.disconnect()
mount_path = self._volume_client_python(self.mount_b, dedent("""
vp = VolumePath("{group_id}", "{volume_id}")
create_result = vc.create_volume(vp, 1024*1024*{volume_size})
- print create_result['mount_path']
+ print(create_result['mount_path'])
""".format(
group_id=group_id,
volume_id=volume_id,
@@ -479,7 +480,7 @@ vc.disconnect()
self._volume_client_python(volumeclient_mount, dedent("""
vp = VolumePath("{group_id}", "{volume_id}")
create_result = vc.create_volume(vp, 10 * 1024 * 1024)
- print create_result['mount_path']
+ print(create_result['mount_path'])
""".format(
group_id=group_id,
volume_id=volume_ids[i]
@@ -562,7 +563,7 @@ vc.disconnect()
mount_path = self._volume_client_python(self.mount_b, dedent("""
vp = VolumePath("{group_id}", u"{volume_id}")
create_result = vc.create_volume(vp, 10)
- print create_result['mount_path']
+ print(create_result['mount_path'])
""".format(
group_id=group_id,
volume_id=volume_id
@@ -612,7 +613,7 @@ vc.disconnect()
mount_path = self._volume_client_python(volumeclient_mount, dedent("""
vp = VolumePath("{group_id}", "{volume_id}")
create_result = vc.create_volume(vp, 1024*1024*10)
- print create_result['mount_path']
+ print(create_result['mount_path'])
""".format(
group_id=group_id,
volume_id=volume_id,
@@ -667,14 +668,14 @@ vc.disconnect()
guest_entity_1 = "guest1"
guest_entity_2 = "guest2"
- log.info("print group ID: {0}".format(group_id))
+ log.info("print(group ID: {0})".format(group_id))
# Create a volume.
auths = self._volume_client_python(volumeclient_mount, dedent("""
vp = VolumePath("{group_id}", "{volume_id}")
vc.create_volume(vp, 1024*1024*10)
auths = vc.get_authorized_ids(vp)
- print auths
+ print(auths)
""".format(
group_id=group_id,
volume_id=volume_id,
@@ -689,7 +690,7 @@ vc.disconnect()
vc.authorize(vp, "{guest_entity_1}", readonly=False)
vc.authorize(vp, "{guest_entity_2}", readonly=True)
auths = vc.get_authorized_ids(vp)
- print auths
+ print(auths)
""".format(
group_id=group_id,
volume_id=volume_id,
@@ -697,7 +698,11 @@ vc.disconnect()
guest_entity_2=guest_entity_2,
)))
# Check the list of authorized IDs and their access levels.
- expected_result = [(u'guest1', u'rw'), (u'guest2', u'r')]
+ if self.py_version == 'python3':
+ expected_result = [('guest1', 'rw'), ('guest2', 'r')]
+ else:
+ expected_result = [(u'guest1', u'rw'), (u'guest2', u'r')]
+
self.assertItemsEqual(str(expected_result), auths)
# Disallow both the auth IDs' access to the volume.
@@ -706,7 +711,7 @@ vc.disconnect()
vc.deauthorize(vp, "{guest_entity_1}")
vc.deauthorize(vp, "{guest_entity_2}")
auths = vc.get_authorized_ids(vp)
- print auths
+ print(auths)
""".format(
group_id=group_id,
volume_id=volume_id,
@@ -783,11 +788,11 @@ vc.disconnect()
"version": 2,
"compat_version": 1,
"dirty": False,
- "tenant_id": u"tenant1",
+ "tenant_id": "tenant1",
"volumes": {
"groupid/volumeid": {
"dirty": False,
- "access_level": u"rw",
+ "access_level": "rw"
}
}
}
@@ -817,7 +822,7 @@ vc.disconnect()
"auths": {
"guest": {
"dirty": False,
- "access_level": u"rw"
+ "access_level": "rw"
}
}
}
@@ -1021,7 +1026,7 @@ vc.disconnect()
mount_path = self._volume_client_python(vc_mount, dedent("""
vp = VolumePath("{group_id}", "{volume_id}")
create_result = vc.create_volume(vp, 1024*1024*10)
- print create_result['mount_path']
+ print(create_result['mount_path'])
""".format(
group_id=group_id,
volume_id=volume_id
@@ -1060,7 +1065,7 @@ vc.disconnect()
mount_path = self._volume_client_python(vc_mount, dedent("""
vp = VolumePath("{group_id}", "{volume_id}")
create_result = vc.create_volume(vp, 1024*1024*10, namespace_isolated=False)
- print create_result['mount_path']
+ print(create_result['mount_path'])
""".format(
group_id=group_id,
volume_id=volume_id
--
2.23.0

View File

@ -0,0 +1,48 @@
From 7e45e2905f2f61bf9d100308df979f432754982b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=C4=90=E1=BA=B7ng=20Minh=20D=C5=A9ng?= <dungdm93@live.com>
Date: Sun, 10 May 2020 11:37:23 +0700
Subject: [PATCH 1/5] pybind/ceph_volume_client: Fix PEP-8 SyntaxWarning
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Signed-off-by: Đặng Minh Dũng <dungdm93@live.com>
(cherry picked from commit 3ce9a89a5a1a2d7fa3d57c597b781a6aece7cbb5)
---
src/pybind/ceph_volume_client.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/src/pybind/ceph_volume_client.py b/src/pybind/ceph_volume_client.py
index 06380ef417f..215c74f5186 100644
--- a/src/pybind/ceph_volume_client.py
+++ b/src/pybind/ceph_volume_client.py
@@ -335,7 +335,7 @@ class CephFSVolumeClient(object):
continue
(group_id, volume_id) = volume.split('/')
- group_id = group_id if group_id is not 'None' else None
+ group_id = group_id if group_id != 'None' else None
volume_path = VolumePath(group_id, volume_id)
access_level = volume_data['access_level']
@@ -358,7 +358,7 @@ class CephFSVolumeClient(object):
if vol_meta['auths'][auth_id] == want_auth:
continue
- readonly = True if access_level is 'r' else False
+ readonly = access_level == 'r'
self._authorize_volume(volume_path, auth_id, readonly)
# Recovered from partial auth updates for the auth ID's access
@@ -1088,7 +1088,7 @@ class CephFSVolumeClient(object):
# Construct auth caps that if present might conflict with the desired
# auth caps.
- unwanted_access_level = 'r' if want_access_level is 'rw' else 'rw'
+ unwanted_access_level = 'r' if want_access_level == 'rw' else 'rw'
unwanted_mds_cap = 'allow {0} path={1}'.format(unwanted_access_level, path)
if namespace:
unwanted_osd_cap = 'allow {0} pool={1} namespace={2}'.format(
--
2.23.0

172
0012-CVE-2020-27781-2.patch Normal file
View File

@ -0,0 +1,172 @@
From 1de5caf2da9b06aa4f363f9706c693213a6ee59f Mon Sep 17 00:00:00 2001
From: Ramana Raja <rraja@redhat.com>
Date: Wed, 25 Nov 2020 16:44:35 +0530
Subject: [PATCH 2/5] pybind/ceph_volume_client: Disallow authorize auth_id
This patch disallow the ceph_volume_client to authorize the auth_id
which is not created by ceph_volume_client. Those auth_ids could be
created by other means for other use cases which should not be modified
by ceph_volume_client.
Fixes: https://tracker.ceph.com/issues/48555
Signed-off-by: Ramana Raja <rraja@redhat.com>
Signed-off-by: Kotresh HR <khiremat@redhat.com>
(cherry picked from commit 3a85d2d04028a323952a31d18cdbefb710be2e2b)
---
src/pybind/ceph_volume_client.py | 63 ++++++++++++++++++++------------
1 file changed, 39 insertions(+), 24 deletions(-)
diff --git a/src/pybind/ceph_volume_client.py b/src/pybind/ceph_volume_client.py
index 215c74f5186..a639fee7dea 100644
--- a/src/pybind/ceph_volume_client.py
+++ b/src/pybind/ceph_volume_client.py
@@ -213,6 +213,7 @@ CEPHFSVOLUMECLIENT_VERSION_HISTORY = """
* 2 - Added get_object, put_object, delete_object methods to CephFSVolumeClient
* 3 - Allow volumes to be created without RADOS namespace isolation
* 4 - Added get_object_and_version, put_object_versioned method to CephFSVolumeClient
+ * 5 - Disallow authorize API for users not created by CephFSVolumeClient
"""
@@ -236,7 +237,7 @@ class CephFSVolumeClient(object):
"""
# Current version
- version = 4
+ version = 5
# Where shall we create our volumes?
POOL_PREFIX = "fsvolume_"
@@ -359,7 +360,18 @@ class CephFSVolumeClient(object):
continue
readonly = access_level == 'r'
- self._authorize_volume(volume_path, auth_id, readonly)
+ client_entity = "client.{0}".format(auth_id)
+ try:
+ existing_caps = self._rados_command(
+ 'auth get',
+ {
+ 'entity': client_entity
+ }
+ )
+ # FIXME: rados raising Error instead of ObjectNotFound in auth get failure
+ except rados.Error:
+ existing_caps = None
+ self._authorize_volume(volume_path, auth_id, readonly, existing_caps)
# Recovered from partial auth updates for the auth ID's access
# to a volume.
@@ -943,6 +955,18 @@ class CephFSVolumeClient(object):
"""
with self._auth_lock(auth_id):
+ client_entity = "client.{0}".format(auth_id)
+ try:
+ existing_caps = self._rados_command(
+ 'auth get',
+ {
+ 'entity': client_entity
+ }
+ )
+ # FIXME: rados raising Error instead of ObjectNotFound in auth get failure
+ except rados.Error:
+ existing_caps = None
+
# Existing meta, or None, to be updated
auth_meta = self._auth_metadata_get(auth_id)
@@ -956,7 +980,14 @@ class CephFSVolumeClient(object):
'dirty': True,
}
}
+
if auth_meta is None:
+ if existing_caps is not None:
+ msg = "auth ID: {0} exists and not created by ceph_volume_client. Not allowed to modify".format(auth_id)
+ log.error(msg)
+ raise CephFSVolumeClientError(msg)
+
+ # non-existent auth IDs
sys.stderr.write("Creating meta for ID {0} with tenant {1}\n".format(
auth_id, tenant_id
))
@@ -966,14 +997,6 @@ class CephFSVolumeClient(object):
'tenant_id': tenant_id.__str__() if tenant_id else None,
'volumes': volume
}
-
- # Note: this is *not* guaranteeing that the key doesn't already
- # exist in Ceph: we are allowing VolumeClient tenants to
- # 'claim' existing Ceph keys. In order to prevent VolumeClient
- # tenants from reading e.g. client.admin keys, you need to
- # have configured your VolumeClient user (e.g. Manila) to
- # have mon auth caps that prevent it from accessing those keys
- # (e.g. limit it to only access keys with a manila.* prefix)
else:
# Disallow tenants to share auth IDs
if auth_meta['tenant_id'].__str__() != tenant_id.__str__():
@@ -993,7 +1016,7 @@ class CephFSVolumeClient(object):
self._auth_metadata_set(auth_id, auth_meta)
with self._volume_lock(volume_path):
- key = self._authorize_volume(volume_path, auth_id, readonly)
+ key = self._authorize_volume(volume_path, auth_id, readonly, existing_caps)
auth_meta['dirty'] = False
auth_meta['volumes'][volume_path_str]['dirty'] = False
@@ -1010,7 +1033,7 @@ class CephFSVolumeClient(object):
'auth_key': None
}
- def _authorize_volume(self, volume_path, auth_id, readonly):
+ def _authorize_volume(self, volume_path, auth_id, readonly, existing_caps):
vol_meta = self._volume_metadata_get(volume_path)
access_level = 'r' if readonly else 'rw'
@@ -1029,14 +1052,14 @@ class CephFSVolumeClient(object):
vol_meta['auths'].update(auth)
self._volume_metadata_set(volume_path, vol_meta)
- key = self._authorize_ceph(volume_path, auth_id, readonly)
+ key = self._authorize_ceph(volume_path, auth_id, readonly, existing_caps)
vol_meta['auths'][auth_id]['dirty'] = False
self._volume_metadata_set(volume_path, vol_meta)
return key
- def _authorize_ceph(self, volume_path, auth_id, readonly):
+ def _authorize_ceph(self, volume_path, auth_id, readonly, existing_caps):
path = self._get_path(volume_path)
log.debug("Authorizing Ceph id '{0}' for path '{1}'".format(
auth_id, path
@@ -1064,15 +1087,7 @@ class CephFSVolumeClient(object):
want_osd_cap = 'allow {0} pool={1}'.format(want_access_level,
pool_name)
- try:
- existing = self._rados_command(
- 'auth get',
- {
- 'entity': client_entity
- }
- )
- # FIXME: rados raising Error instead of ObjectNotFound in auth get failure
- except rados.Error:
+ if existing_caps is None:
caps = self._rados_command(
'auth get-or-create',
{
@@ -1084,7 +1099,7 @@ class CephFSVolumeClient(object):
})
else:
# entity exists, update it
- cap = existing[0]
+ cap = existing_caps[0]
# Construct auth caps that if present might conflict with the desired
# auth caps.
--
2.23.0

113
0013-CVE-2020-27781-3.patch Normal file
View File

@ -0,0 +1,113 @@
From eb2fa6934fc736f8abe6d9e237b0a14c9d877626 Mon Sep 17 00:00:00 2001
From: Kotresh HR <khiremat@redhat.com>
Date: Thu, 26 Nov 2020 14:48:16 +0530
Subject: [PATCH 3/5] pybind/ceph_volume_client: Preserve existing caps while
authorize/deauthorize auth-id
Authorize/Deauthorize used to overwrite the caps of auth-id which would
end up deleting existing caps. This patch fixes the same by retaining
the existing caps by appending or deleting the new caps as needed.
Fixes: https://tracker.ceph.com/issues/48555
Signed-off-by: Kotresh HR <khiremat@redhat.com>
(cherry picked from commit 47100e528ef77e7e82dc9877424243dc6a7e7533)
---
src/pybind/ceph_volume_client.py | 43 ++++++++++++++++++++++----------
1 file changed, 30 insertions(+), 13 deletions(-)
diff --git a/src/pybind/ceph_volume_client.py b/src/pybind/ceph_volume_client.py
index a639fee7dea..33f6beabd18 100644
--- a/src/pybind/ceph_volume_client.py
+++ b/src/pybind/ceph_volume_client.py
@@ -941,6 +941,26 @@ class CephFSVolumeClient(object):
data['version'] = self.version
return self._metadata_set(self._volume_metadata_path(volume_path), data)
+ def _prepare_updated_caps_list(self, existing_caps, mds_cap_str, osd_cap_str, authorize=True):
+ caps_list = []
+ for k, v in existing_caps['caps'].items():
+ if k == 'mds' or k == 'osd':
+ continue
+ elif k == 'mon':
+ if not authorize and v == 'allow r':
+ continue
+ caps_list.extend((k,v))
+
+ if mds_cap_str:
+ caps_list.extend(('mds', mds_cap_str))
+ if osd_cap_str:
+ caps_list.extend(('osd', osd_cap_str))
+
+ if authorize and 'mon' not in caps_list:
+ caps_list.extend(('mon', 'allow r'))
+
+ return caps_list
+
def authorize(self, volume_path, auth_id, readonly=False, tenant_id=None):
"""
Get-or-create a Ceph auth identity for `auth_id` and grant them access
@@ -1119,8 +1139,8 @@ class CephFSVolumeClient(object):
if not orig_mds_caps:
return want_mds_cap, want_osd_cap
- mds_cap_tokens = orig_mds_caps.split(",")
- osd_cap_tokens = orig_osd_caps.split(",")
+ mds_cap_tokens = [x.strip() for x in orig_mds_caps.split(",")]
+ osd_cap_tokens = [x.strip() for x in orig_osd_caps.split(",")]
if want_mds_cap in mds_cap_tokens:
return orig_mds_caps, orig_osd_caps
@@ -1141,15 +1161,14 @@ class CephFSVolumeClient(object):
orig_mds_caps, orig_osd_caps, want_mds_cap, want_osd_cap,
unwanted_mds_cap, unwanted_osd_cap)
+ caps_list = self._prepare_updated_caps_list(cap, mds_cap_str, osd_cap_str)
caps = self._rados_command(
'auth caps',
{
'entity': client_entity,
- 'caps': [
- 'mds', mds_cap_str,
- 'osd', osd_cap_str,
- 'mon', cap['caps'].get('mon', 'allow r')]
+ 'caps': caps_list
})
+
caps = self._rados_command(
'auth get',
{
@@ -1274,8 +1293,8 @@ class CephFSVolumeClient(object):
)
def cap_remove(orig_mds_caps, orig_osd_caps, want_mds_caps, want_osd_caps):
- mds_cap_tokens = orig_mds_caps.split(",")
- osd_cap_tokens = orig_osd_caps.split(",")
+ mds_cap_tokens = [x.strip() for x in orig_mds_caps.split(",")]
+ osd_cap_tokens = [x.strip() for x in orig_osd_caps.split(",")]
for want_mds_cap, want_osd_cap in zip(want_mds_caps, want_osd_caps):
if want_mds_cap in mds_cap_tokens:
@@ -1291,17 +1310,15 @@ class CephFSVolumeClient(object):
mds_cap_str, osd_cap_str = cap_remove(orig_mds_caps, orig_osd_caps,
want_mds_caps, want_osd_caps)
- if not mds_cap_str:
+ caps_list = self._prepare_updated_caps_list(cap, mds_cap_str, osd_cap_str, authorize=False)
+ if not caps_list:
self._rados_command('auth del', {'entity': client_entity}, decode=False)
else:
self._rados_command(
'auth caps',
{
'entity': client_entity,
- 'caps': [
- 'mds', mds_cap_str,
- 'osd', osd_cap_str,
- 'mon', cap['caps'].get('mon', 'allow r')]
+ 'caps': caps_list
})
# FIXME: rados raising Error instead of ObjectNotFound in auth get failure
--
2.23.0

View File

@ -0,0 +1,52 @@
From ae1889014e5becb774b69ca52ed7465a33873a3f Mon Sep 17 00:00:00 2001
From: Kotresh HR <khiremat@redhat.com>
Date: Sun, 6 Dec 2020 12:40:20 +0530
Subject: [PATCH 4/5] pybind/ceph_volume_client: Optionally authorize existing
auth-ids
Optionally allow authorizing auth-ids not created by ceph_volume_client
via the option 'allow_existing_id'. This can help existing deployers
of manila to disallow/allow authorization of pre-created auth IDs
via a manila driver config that sets 'allow_existing_id' to False/True.
Fixes: https://tracker.ceph.com/issues/48555
Signed-off-by: Kotresh HR <khiremat@redhat.com>
(cherry picked from commit 77b42496e25cbd4af2e80a064ddf26221b53733f)
---
src/pybind/ceph_volume_client.py | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/src/pybind/ceph_volume_client.py b/src/pybind/ceph_volume_client.py
index 33f6beabd18..7f48a466079 100644
--- a/src/pybind/ceph_volume_client.py
+++ b/src/pybind/ceph_volume_client.py
@@ -961,7 +961,7 @@ class CephFSVolumeClient(object):
return caps_list
- def authorize(self, volume_path, auth_id, readonly=False, tenant_id=None):
+ def authorize(self, volume_path, auth_id, readonly=False, tenant_id=None, allow_existing_id=False):
"""
Get-or-create a Ceph auth identity for `auth_id` and grant them access
to
@@ -971,6 +971,8 @@ class CephFSVolumeClient(object):
:param tenant_id: Optionally provide a stringizable object to
restrict any created cephx IDs to other callers
passing the same tenant ID.
+ :allow_existing_id: Optionally authorize existing auth-ids not
+ created by ceph_volume_client
:return:
"""
@@ -1002,7 +1004,7 @@ class CephFSVolumeClient(object):
}
if auth_meta is None:
- if existing_caps is not None:
+ if not allow_existing_id and existing_caps is not None:
msg = "auth ID: {0} exists and not created by ceph_volume_client. Not allowed to modify".format(auth_id)
log.error(msg)
raise CephFSVolumeClientError(msg)
--
2.23.0

275
0015-CVE-2020-27781-5.patch Normal file
View File

@ -0,0 +1,275 @@
From a036cf3cbf47bbc8fd7793a80767c1257ed426d1 Mon Sep 17 00:00:00 2001
From: Kotresh HR <khiremat@redhat.com>
Date: Tue, 1 Dec 2020 16:14:17 +0530
Subject: [PATCH 5/5] tasks/cephfs/test_volume_client: Add tests for
authorize/deauthorize
1. Add testcase for authorizing auth_id which is not added by
ceph_volume_client
2. Add testcase to test 'allow_existing_id' option
3. Add testcase for deauthorizing auth_id which has got it's caps
updated out of band
Signed-off-by: Kotresh HR <khiremat@redhat.com>
(cherry picked from commit aa4beb3d993649a696af95cf27150cc460baaf70)
Conflicts:
qa/tasks/cephfs/test_volume_client.py
---
qa/tasks/cephfs/test_volume_client.py | 213 +++++++++++++++++++++++++-
1 file changed, 209 insertions(+), 4 deletions(-)
diff --git a/qa/tasks/cephfs/test_volume_client.py b/qa/tasks/cephfs/test_volume_client.py
index 06094dd6fe9..bdb4ad022d8 100644
--- a/qa/tasks/cephfs/test_volume_client.py
+++ b/qa/tasks/cephfs/test_volume_client.py
@@ -81,7 +81,7 @@ vc.disconnect()
def _configure_guest_auth(self, volumeclient_mount, guest_mount,
guest_entity, mount_path,
namespace_prefix=None, readonly=False,
- tenant_id=None):
+ tenant_id=None, allow_existing_id=False):
"""
Set up auth credentials for the guest client to mount a volume.
@@ -106,14 +106,16 @@ vc.disconnect()
key = self._volume_client_python(volumeclient_mount, dedent("""
vp = VolumePath("{group_id}", "{volume_id}")
auth_result = vc.authorize(vp, "{guest_entity}", readonly={readonly},
- tenant_id="{tenant_id}")
+ tenant_id="{tenant_id}",
+ allow_existing_id="{allow_existing_id}")
print(auth_result['auth_key'])
""".format(
group_id=group_id,
volume_id=volume_id,
guest_entity=guest_entity,
readonly=readonly,
- tenant_id=tenant_id)), volume_prefix, namespace_prefix
+ tenant_id=tenant_id,
+ allow_existing_id=allow_existing_id)), volume_prefix, namespace_prefix
)
# CephFSVolumeClient's authorize() does not return the secret
@@ -886,6 +888,209 @@ vc.disconnect()
)))
self.assertNotIn(vol_metadata_filename, self.mounts[0].ls("volumes"))
+ def test_authorize_auth_id_not_created_by_ceph_volume_client(self):
+ """
+ If the auth_id already exists and is not created by
+ ceph_volume_client, it's not allowed to authorize
+ the auth-id by default.
+ """
+ volumeclient_mount = self.mounts[1]
+ volumeclient_mount.umount_wait()
+
+ # Configure volumeclient_mount as the handle for driving volumeclient.
+ self._configure_vc_auth(volumeclient_mount, "manila")
+
+ group_id = "groupid"
+ volume_id = "volumeid"
+
+ # Create auth_id
+ out = self.fs.mon_manager.raw_cluster_cmd(
+ "auth", "get-or-create", "client.guest1",
+ "mds", "allow *",
+ "osd", "allow rw",
+ "mon", "allow *"
+ )
+
+ auth_id = "guest1"
+ guestclient_1 = {
+ "auth_id": auth_id,
+ "tenant_id": "tenant1",
+ }
+
+ # Create a volume.
+ self._volume_client_python(volumeclient_mount, dedent("""
+ vp = VolumePath("{group_id}", "{volume_id}")
+ vc.create_volume(vp, 1024*1024*10)
+ """.format(
+ group_id=group_id,
+ volume_id=volume_id,
+ )))
+
+ # Cannot authorize 'guestclient_1' to access the volume.
+ # It uses auth ID 'guest1', which already exists and not
+ # created by ceph_volume_client
+ with self.assertRaises(CommandFailedError):
+ self._volume_client_python(volumeclient_mount, dedent("""
+ vp = VolumePath("{group_id}", "{volume_id}")
+ vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
+ """.format(
+ group_id=group_id,
+ volume_id=volume_id,
+ auth_id=guestclient_1["auth_id"],
+ tenant_id=guestclient_1["tenant_id"]
+ )))
+
+ # Delete volume
+ self._volume_client_python(volumeclient_mount, dedent("""
+ vp = VolumePath("{group_id}", "{volume_id}")
+ vc.delete_volume(vp)
+ """.format(
+ group_id=group_id,
+ volume_id=volume_id,
+ )))
+
+ def test_authorize_allow_existing_id_option(self):
+ """
+ If the auth_id already exists and is not created by
+ ceph_volume_client, it's not allowed to authorize
+ the auth-id by default but is allowed with option
+ allow_existing_id.
+ """
+ volumeclient_mount = self.mounts[1]
+ volumeclient_mount.umount_wait()
+
+ # Configure volumeclient_mount as the handle for driving volumeclient.
+ self._configure_vc_auth(volumeclient_mount, "manila")
+
+ group_id = "groupid"
+ volume_id = "volumeid"
+
+ # Create auth_id
+ out = self.fs.mon_manager.raw_cluster_cmd(
+ "auth", "get-or-create", "client.guest1",
+ "mds", "allow *",
+ "osd", "allow rw",
+ "mon", "allow *"
+ )
+
+ auth_id = "guest1"
+ guestclient_1 = {
+ "auth_id": auth_id,
+ "tenant_id": "tenant1",
+ }
+
+ # Create a volume.
+ self._volume_client_python(volumeclient_mount, dedent("""
+ vp = VolumePath("{group_id}", "{volume_id}")
+ vc.create_volume(vp, 1024*1024*10)
+ """.format(
+ group_id=group_id,
+ volume_id=volume_id,
+ )))
+
+ # Cannot authorize 'guestclient_1' to access the volume
+ # by default, which already exists and not created by
+ # ceph_volume_client but is allowed with option 'allow_existing_id'.
+ self._volume_client_python(volumeclient_mount, dedent("""
+ vp = VolumePath("{group_id}", "{volume_id}")
+ vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}",
+ allow_existing_id="{allow_existing_id}")
+ """.format(
+ group_id=group_id,
+ volume_id=volume_id,
+ auth_id=guestclient_1["auth_id"],
+ tenant_id=guestclient_1["tenant_id"],
+ allow_existing_id=True
+ )))
+
+ # Delete volume
+ self._volume_client_python(volumeclient_mount, dedent("""
+ vp = VolumePath("{group_id}", "{volume_id}")
+ vc.delete_volume(vp)
+ """.format(
+ group_id=group_id,
+ volume_id=volume_id,
+ )))
+
+ def test_deauthorize_auth_id_after_out_of_band_update(self):
+ """
+ If the auth_id authorized by ceph_volume_client is updated
+ out of band, the auth_id should not be deleted after a
+ deauthorize. It should only remove caps associated it.
+ """
+ volumeclient_mount = self.mounts[1]
+ volumeclient_mount.umount_wait()
+
+ # Configure volumeclient_mount as the handle for driving volumeclient.
+ self._configure_vc_auth(volumeclient_mount, "manila")
+
+ group_id = "groupid"
+ volume_id = "volumeid"
+
+
+ auth_id = "guest1"
+ guestclient_1 = {
+ "auth_id": auth_id,
+ "tenant_id": "tenant1",
+ }
+
+ # Create a volume.
+ self._volume_client_python(volumeclient_mount, dedent("""
+ vp = VolumePath("{group_id}", "{volume_id}")
+ vc.create_volume(vp, 1024*1024*10)
+ """.format(
+ group_id=group_id,
+ volume_id=volume_id,
+ )))
+
+ # Authorize 'guestclient_1' to access the volume.
+ self._volume_client_python(volumeclient_mount, dedent("""
+ vp = VolumePath("{group_id}", "{volume_id}")
+ vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
+ """.format(
+ group_id=group_id,
+ volume_id=volume_id,
+ auth_id=guestclient_1["auth_id"],
+ tenant_id=guestclient_1["tenant_id"]
+ )))
+
+ # Update caps for guestclient_1 out of band
+ out = self.fs.mon_manager.raw_cluster_cmd(
+ "auth", "caps", "client.guest1",
+ "mds", "allow rw path=/volumes/groupid, allow rw path=/volumes/groupid/volumeid",
+ "osd", "allow rw pool=cephfs_data namespace=fsvolumens_volumeid",
+ "mon", "allow r",
+ "mgr", "allow *"
+ )
+
+ # Deauthorize guestclient_1
+ self._volume_client_python(volumeclient_mount, dedent("""
+ vp = VolumePath("{group_id}", "{volume_id}")
+ vc.deauthorize(vp, "{guest_entity}")
+ """.format(
+ group_id=group_id,
+ volume_id=volume_id,
+ guest_entity=guestclient_1["auth_id"]
+ )))
+
+ # Validate the caps of guestclient_1 after deauthorize. It should not have deleted
+ # guestclient_1. The mgr and mds caps should be present which was updated out of band.
+ out = json.loads(self.fs.mon_manager.raw_cluster_cmd("auth", "get", "client.guest1", "--format=json-pretty"))
+
+ self.assertEqual("client.guest1", out[0]["entity"])
+ self.assertEqual("allow rw path=/volumes/groupid", out[0]["caps"]["mds"])
+ self.assertEqual("allow *", out[0]["caps"]["mgr"])
+ self.assertNotIn("osd", out[0]["caps"])
+
+ # Delete volume
+ self._volume_client_python(volumeclient_mount, dedent("""
+ vp = VolumePath("{group_id}", "{volume_id}")
+ vc.delete_volume(vp)
+ """.format(
+ group_id=group_id,
+ volume_id=volume_id,
+ )))
+
def test_recover_metadata(self):
"""
That volume client can recover from partial auth updates using
@@ -1067,7 +1272,7 @@ vc.disconnect()
guest_mount.umount_wait()
# Set auth caps for the auth ID using the volumeclient
- self._configure_guest_auth(vc_mount, guest_mount, guest_id, mount_path)
+ self._configure_guest_auth(vc_mount, guest_mount, guest_id, mount_path, allow_existing_id=True)
# Mount the volume in the guest using the auth ID to assert that the
# auth caps are valid
--
2.23.0

View File

@ -68,7 +68,7 @@
#################################################################################
Name: ceph
Version: 12.2.8
Release: 12
Release: 13
Epoch: 2
# define _epoch_prefix macro which will expand to the empty string if epoch is
@ -91,6 +91,14 @@ Patch4: 0004-CVE-2018-14662.patch
Patch5: 0005-CVE-2020-12059.patch
Patch6: 0006-CVE-2020-25678-1.patch
Patch7: 0007-CVE-2020-25678-2.patch
Patch8: 0008-ceph-volume-client-allow-atomic-updates-for-RADOS-ob.patch
Patch9: 0009-qa-ceph-volume-add-a-test-for-put_object_versioned.patch
Patch10: 0010-qa-make-test_volume_client.py-py3-compatible.patch
Patch11: 0011-CVE-2020-27781-1.patch
Patch12: 0012-CVE-2020-27781-2.patch
Patch13: 0013-CVE-2020-27781-3.patch
Patch14: 0014-CVE-2020-27781-4.patch
Patch15: 0015-CVE-2020-27781-5.patch
%if 0%{?suse_version}
%if 0%{?is_opensuse}
@ -1799,6 +1807,11 @@ exit 0
%changelog
* Sun Jul 18 2021 chixinze <xmdxcxz@gmail.com> - 1:12.2.8-13
- fix CVE-2020-27781
- ceph-volume-client: allow atomic updates for RADOS objects
- qa: make test_volume_client.py py3 compatible
* Wed Mar 10 2021 Zhuohui Zou <zhuohui@xsky.com> - 1:12.2.8-12
- fix CVE-2020-25678