forked from vitalif/vitastor
Compare commits
2 Commits
master
...
msgr-iothr
Author | SHA1 | Date |
---|---|---|
Vitaliy Filippov | 118a1cd521 | |
Vitaliy Filippov | 6da49d38fe |
|
@ -11,8 +11,7 @@ To enable Vitastor support in an OpenStack installation:
|
||||||
- Install vitastor-client, patched QEMU and libvirt packages from Vitastor DEB or RPM repository
|
- Install vitastor-client, patched QEMU and libvirt packages from Vitastor DEB or RPM repository
|
||||||
- Use `patches/nova-21.diff` or `patches/nova-23.diff` to patch your Nova installation.
|
- Use `patches/nova-21.diff` or `patches/nova-23.diff` to patch your Nova installation.
|
||||||
Patch 21 fits Nova 21-22, patch 23 fits Nova 23-24.
|
Patch 21 fits Nova 21-22, patch 23 fits Nova 23-24.
|
||||||
- Install `patches/cinder-vitastor-21.py` or `pathces/cinder-vitastor-22.py` as `..../cinder/volume/drivers/vitastor.py`
|
- Install `patches/cinder-vitastor.py` as `..../cinder/volume/drivers/vitastor.py`
|
||||||
Patch 21 fits Cinder up 21 (zed), Patch 22 fits Cinder after 22 (2023.1)
|
|
||||||
- Define a volume type in cinder.conf (see below)
|
- Define a volume type in cinder.conf (see below)
|
||||||
- Block network access from VMs to Vitastor network (to OSDs and etcd),
|
- Block network access from VMs to Vitastor network (to OSDs and etcd),
|
||||||
because Vitastor doesn't support authentication
|
because Vitastor doesn't support authentication
|
||||||
|
|
|
@ -11,8 +11,7 @@
|
||||||
- Установите пакеты vitastor-client, libvirt и QEMU из DEB или RPM репозитория Vitastor
|
- Установите пакеты vitastor-client, libvirt и QEMU из DEB или RPM репозитория Vitastor
|
||||||
- Примените патч `patches/nova-21.diff` или `patches/nova-23.diff` к вашей инсталляции Nova.
|
- Примените патч `patches/nova-21.diff` или `patches/nova-23.diff` к вашей инсталляции Nova.
|
||||||
nova-21.diff подходит для Nova 21-22, nova-23.diff подходит для Nova 23-24.
|
nova-21.diff подходит для Nova 21-22, nova-23.diff подходит для Nova 23-24.
|
||||||
- Скопируйте `patches/cinder-vitastor-21.py` или `pathces/cinder-vitastor-22.py` в инсталляцию Cinder как `cinder/volume/drivers/vitastor.py`.
|
- Скопируйте `patches/cinder-vitastor.py` в инсталляцию Cinder как `cinder/volume/drivers/vitastor.py`
|
||||||
`cinder-vitastor-21.py` подходит для Cinder 21 (zed) и младше, `cinder-vitastor-22.py` подходит для Cinder 22 (2023.1) и старше.
|
|
||||||
- Создайте тип томов в cinder.conf (см. ниже)
|
- Создайте тип томов в cinder.conf (см. ниже)
|
||||||
- Обязательно заблокируйте доступ от виртуальных машин к сети Vitastor (OSD и etcd), т.к. Vitastor (пока) не поддерживает аутентификацию
|
- Обязательно заблокируйте доступ от виртуальных машин к сети Vitastor (OSD и etcd), т.к. Vitastor (пока) не поддерживает аутентификацию
|
||||||
- Перезапустите Cinder и Nova
|
- Перезапустите Cinder и Nova
|
||||||
|
|
|
@ -1,966 +0,0 @@
|
||||||
# Vitastor Driver for OpenStack Cinder
|
|
||||||
#
|
|
||||||
# --------------------------------------------
|
|
||||||
# Install as cinder/volume/drivers/vitastor.py
|
|
||||||
# --------------------------------------------
|
|
||||||
#
|
|
||||||
# Copyright 2020 Vitaliy Filippov
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
"""Cinder Vitastor Driver"""
|
|
||||||
|
|
||||||
import binascii
|
|
||||||
import base64
|
|
||||||
import errno
|
|
||||||
import json
|
|
||||||
import math
|
|
||||||
import os
|
|
||||||
import tempfile
|
|
||||||
|
|
||||||
from castellan import key_manager
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log as logging
|
|
||||||
from oslo_service import loopingcall
|
|
||||||
from oslo_concurrency import processutils
|
|
||||||
from oslo_utils import encodeutils
|
|
||||||
from oslo_utils import excutils
|
|
||||||
from oslo_utils import fileutils
|
|
||||||
from oslo_utils import units
|
|
||||||
import six
|
|
||||||
from six.moves.urllib import request
|
|
||||||
|
|
||||||
from cinder import exception
|
|
||||||
from cinder.i18n import _
|
|
||||||
from cinder.image import image_utils
|
|
||||||
from cinder import interface
|
|
||||||
from cinder import objects
|
|
||||||
from cinder.objects import fields
|
|
||||||
from cinder import utils
|
|
||||||
from cinder.volume import configuration
|
|
||||||
from cinder.volume import driver
|
|
||||||
from cinder.volume import volume_utils
|
|
||||||
|
|
||||||
VERSION = '1.4.1'
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
VITASTOR_OPTS = [
|
|
||||||
cfg.StrOpt(
|
|
||||||
'vitastor_config_path',
|
|
||||||
default='/etc/vitastor/vitastor.conf',
|
|
||||||
help='Vitastor configuration file path'
|
|
||||||
),
|
|
||||||
cfg.StrOpt(
|
|
||||||
'vitastor_etcd_address',
|
|
||||||
default='',
|
|
||||||
help='Vitastor etcd address(es)'),
|
|
||||||
cfg.StrOpt(
|
|
||||||
'vitastor_etcd_prefix',
|
|
||||||
default='/vitastor',
|
|
||||||
help='Vitastor etcd prefix'
|
|
||||||
),
|
|
||||||
cfg.StrOpt(
|
|
||||||
'vitastor_pool_id',
|
|
||||||
default='',
|
|
||||||
help='Vitastor pool ID to use for volumes'
|
|
||||||
),
|
|
||||||
# FIXME exclusive_cinder_pool ?
|
|
||||||
]
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
CONF.register_opts(VITASTOR_OPTS, group = configuration.SHARED_CONF_GROUP)
|
|
||||||
|
|
||||||
class VitastorDriverException(exception.VolumeDriverException):
|
|
||||||
message = _("Vitastor Cinder driver failure: %(reason)s")
|
|
||||||
|
|
||||||
@interface.volumedriver
|
|
||||||
class VitastorDriver(driver.CloneableImageVD,
|
|
||||||
driver.ManageableVD, driver.ManageableSnapshotsVD,
|
|
||||||
driver.BaseVD):
|
|
||||||
"""Implements Vitastor volume commands."""
|
|
||||||
|
|
||||||
cfg = {}
|
|
||||||
_etcd_urls = []
|
|
||||||
|
|
||||||
def __init__(self, active_backend_id = None, *args, **kwargs):
|
|
||||||
super(VitastorDriver, self).__init__(*args, **kwargs)
|
|
||||||
self.configuration.append_config_values(VITASTOR_OPTS)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_driver_options(cls):
|
|
||||||
additional_opts = cls._get_oslo_driver_opts(
|
|
||||||
'reserved_percentage',
|
|
||||||
'max_over_subscription_ratio',
|
|
||||||
'volume_dd_blocksize'
|
|
||||||
)
|
|
||||||
return VITASTOR_OPTS + additional_opts
|
|
||||||
|
|
||||||
def do_setup(self, context):
|
|
||||||
"""Performs initialization steps that could raise exceptions."""
|
|
||||||
super(VitastorDriver, self).do_setup(context)
|
|
||||||
# Make sure configuration is in UTF-8
|
|
||||||
for attr in [ 'config_path', 'etcd_address', 'etcd_prefix', 'pool_id' ]:
|
|
||||||
val = self.configuration.safe_get('vitastor_'+attr)
|
|
||||||
if val is not None:
|
|
||||||
self.cfg[attr] = utils.convert_str(val)
|
|
||||||
self.cfg = self._load_config(self.cfg)
|
|
||||||
|
|
||||||
def _load_config(self, cfg):
|
|
||||||
# Try to load configuration file
|
|
||||||
try:
|
|
||||||
f = open(cfg['config_path'] or '/etc/vitastor/vitastor.conf')
|
|
||||||
conf = json.loads(f.read())
|
|
||||||
f.close()
|
|
||||||
for k in conf:
|
|
||||||
cfg[k] = cfg.get(k, conf[k])
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
if isinstance(cfg['etcd_address'], str):
|
|
||||||
cfg['etcd_address'] = cfg['etcd_address'].split(',')
|
|
||||||
# Sanitize etcd URLs
|
|
||||||
for i, etcd_url in enumerate(cfg['etcd_address']):
|
|
||||||
ssl = False
|
|
||||||
if etcd_url.lower().startswith('http://'):
|
|
||||||
etcd_url = etcd_url[7:]
|
|
||||||
elif etcd_url.lower().startswith('https://'):
|
|
||||||
etcd_url = etcd_url[8:]
|
|
||||||
ssl = True
|
|
||||||
if etcd_url.find('/') < 0:
|
|
||||||
etcd_url += '/v3'
|
|
||||||
if ssl:
|
|
||||||
etcd_url = 'https://'+etcd_url
|
|
||||||
else:
|
|
||||||
etcd_url = 'http://'+etcd_url
|
|
||||||
cfg['etcd_address'][i] = etcd_url
|
|
||||||
return cfg
|
|
||||||
|
|
||||||
def check_for_setup_error(self):
|
|
||||||
"""Returns an error if prerequisites aren't met."""
|
|
||||||
|
|
||||||
def _encode_etcd_key(self, key):
|
|
||||||
if not isinstance(key, bytes):
|
|
||||||
key = str(key).encode('utf-8')
|
|
||||||
return base64.b64encode(self.cfg['etcd_prefix'].encode('utf-8')+b'/'+key).decode('utf-8')
|
|
||||||
|
|
||||||
def _encode_etcd_value(self, value):
|
|
||||||
if not isinstance(value, bytes):
|
|
||||||
value = str(value).encode('utf-8')
|
|
||||||
return base64.b64encode(value).decode('utf-8')
|
|
||||||
|
|
||||||
def _encode_etcd_requests(self, obj):
|
|
||||||
for v in obj:
|
|
||||||
for rt in v:
|
|
||||||
if 'key' in v[rt]:
|
|
||||||
v[rt]['key'] = self._encode_etcd_key(v[rt]['key'])
|
|
||||||
if 'range_end' in v[rt]:
|
|
||||||
v[rt]['range_end'] = self._encode_etcd_key(v[rt]['range_end'])
|
|
||||||
if 'value' in v[rt]:
|
|
||||||
v[rt]['value'] = self._encode_etcd_value(v[rt]['value'])
|
|
||||||
|
|
||||||
def _etcd_txn(self, params):
|
|
||||||
if 'compare' in params:
|
|
||||||
for v in params['compare']:
|
|
||||||
if 'key' in v:
|
|
||||||
v['key'] = self._encode_etcd_key(v['key'])
|
|
||||||
if 'failure' in params:
|
|
||||||
self._encode_etcd_requests(params['failure'])
|
|
||||||
if 'success' in params:
|
|
||||||
self._encode_etcd_requests(params['success'])
|
|
||||||
body = json.dumps(params).encode('utf-8')
|
|
||||||
headers = {
|
|
||||||
'Content-Type': 'application/json'
|
|
||||||
}
|
|
||||||
err = None
|
|
||||||
for etcd_url in self.cfg['etcd_address']:
|
|
||||||
try:
|
|
||||||
resp = request.urlopen(request.Request(etcd_url+'/kv/txn', body, headers), timeout = 5)
|
|
||||||
data = json.loads(resp.read())
|
|
||||||
if 'responses' not in data:
|
|
||||||
data['responses'] = []
|
|
||||||
for i, resp in enumerate(data['responses']):
|
|
||||||
if 'response_range' in resp:
|
|
||||||
if 'kvs' not in resp['response_range']:
|
|
||||||
resp['response_range']['kvs'] = []
|
|
||||||
for kv in resp['response_range']['kvs']:
|
|
||||||
kv['key'] = base64.b64decode(kv['key'].encode('utf-8')).decode('utf-8')
|
|
||||||
if kv['key'].startswith(self.cfg['etcd_prefix']+'/'):
|
|
||||||
kv['key'] = kv['key'][len(self.cfg['etcd_prefix'])+1 : ]
|
|
||||||
kv['value'] = json.loads(base64.b64decode(kv['value'].encode('utf-8')))
|
|
||||||
if len(resp.keys()) != 1:
|
|
||||||
LOG.exception('unknown responses['+str(i)+'] format: '+json.dumps(resp))
|
|
||||||
else:
|
|
||||||
resp = data['responses'][i] = resp[list(resp.keys())[0]]
|
|
||||||
return data
|
|
||||||
except Exception as e:
|
|
||||||
LOG.exception('error calling etcd transaction: '+body.decode('utf-8')+'\nerror: '+str(e))
|
|
||||||
err = e
|
|
||||||
raise err
|
|
||||||
|
|
||||||
def _etcd_foreach(self, prefix, add_fn):
|
|
||||||
total = 0
|
|
||||||
batch = 1000
|
|
||||||
begin = prefix+'/'
|
|
||||||
while True:
|
|
||||||
resp = self._etcd_txn({ 'success': [
|
|
||||||
{ 'request_range': {
|
|
||||||
'key': begin,
|
|
||||||
'range_end': prefix+'0',
|
|
||||||
'limit': batch+1,
|
|
||||||
} },
|
|
||||||
] })
|
|
||||||
i = 0
|
|
||||||
while i < batch and i < len(resp['responses'][0]['kvs']):
|
|
||||||
kv = resp['responses'][0]['kvs'][i]
|
|
||||||
add_fn(kv)
|
|
||||||
i += 1
|
|
||||||
if len(resp['responses'][0]['kvs']) <= batch:
|
|
||||||
break
|
|
||||||
begin = resp['responses'][0]['kvs'][batch]['key']
|
|
||||||
return total
|
|
||||||
|
|
||||||
def _update_volume_stats(self):
|
|
||||||
location_info = json.dumps({
|
|
||||||
'config': self.configuration.vitastor_config_path,
|
|
||||||
'etcd_address': self.configuration.vitastor_etcd_address,
|
|
||||||
'etcd_prefix': self.configuration.vitastor_etcd_prefix,
|
|
||||||
'pool_id': self.configuration.vitastor_pool_id,
|
|
||||||
})
|
|
||||||
|
|
||||||
stats = {
|
|
||||||
'vendor_name': 'Vitastor',
|
|
||||||
'driver_version': self.VERSION,
|
|
||||||
'storage_protocol': 'vitastor',
|
|
||||||
'total_capacity_gb': 'unknown',
|
|
||||||
'free_capacity_gb': 'unknown',
|
|
||||||
# FIXME check if safe_get is required
|
|
||||||
'reserved_percentage': self.configuration.safe_get('reserved_percentage'),
|
|
||||||
'multiattach': True,
|
|
||||||
'thin_provisioning_support': True,
|
|
||||||
'max_over_subscription_ratio': self.configuration.safe_get('max_over_subscription_ratio'),
|
|
||||||
'location_info': location_info,
|
|
||||||
'backend_state': 'down',
|
|
||||||
'volume_backend_name': self.configuration.safe_get('volume_backend_name') or 'vitastor',
|
|
||||||
'replication_enabled': False,
|
|
||||||
}
|
|
||||||
|
|
||||||
try:
|
|
||||||
pool_stats = self._etcd_txn({ 'success': [
|
|
||||||
{ 'request_range': { 'key': 'pool/stats/'+str(self.cfg['pool_id']) } }
|
|
||||||
] })
|
|
||||||
total_provisioned = 0
|
|
||||||
def add_total(kv):
|
|
||||||
nonlocal total_provisioned
|
|
||||||
if kv['key'].find('@') >= 0:
|
|
||||||
total_provisioned += kv['value']['size']
|
|
||||||
self._etcd_foreach('config/inode/'+str(self.cfg['pool_id']), lambda kv: add_total(kv))
|
|
||||||
stats['provisioned_capacity_gb'] = round(total_provisioned/1024.0/1024.0/1024.0, 2)
|
|
||||||
pool_stats = pool_stats['responses'][0]['kvs']
|
|
||||||
if len(pool_stats):
|
|
||||||
pool_stats = pool_stats[0]['value']
|
|
||||||
stats['free_capacity_gb'] = round(1024.0*(pool_stats['total_raw_tb']-pool_stats['used_raw_tb'])/pool_stats['raw_to_usable'], 2)
|
|
||||||
stats['total_capacity_gb'] = round(1024.0*pool_stats['total_raw_tb'], 2)
|
|
||||||
stats['backend_state'] = 'up'
|
|
||||||
except Exception as e:
|
|
||||||
# just log and return unknown capacities
|
|
||||||
LOG.exception('error getting vitastor pool stats: '+str(e))
|
|
||||||
|
|
||||||
self._stats = stats
|
|
||||||
|
|
||||||
def get_volume_stats(self, refresh=False):
|
|
||||||
"""Get volume stats.
|
|
||||||
If 'refresh' is True, run update the stats first.
|
|
||||||
"""
|
|
||||||
if not self._stats or refresh:
|
|
||||||
self._update_volume_stats()
|
|
||||||
|
|
||||||
return self._stats
|
|
||||||
|
|
||||||
def _next_id(self, resp):
|
|
||||||
if len(resp['kvs']) == 0:
|
|
||||||
return (1, 0)
|
|
||||||
else:
|
|
||||||
return (1 + resp['kvs'][0]['value'], resp['kvs'][0]['mod_revision'])
|
|
||||||
|
|
||||||
def create_volume(self, volume):
|
|
||||||
"""Creates a logical volume."""
|
|
||||||
|
|
||||||
size = int(volume.size) * units.Gi
|
|
||||||
# FIXME: Check if convert_str is really required
|
|
||||||
vol_name = utils.convert_str(volume.name)
|
|
||||||
if vol_name.find('@') >= 0 or vol_name.find('/') >= 0:
|
|
||||||
raise exception.VolumeBackendAPIException(data = '@ and / are forbidden in volume and snapshot names')
|
|
||||||
|
|
||||||
LOG.debug("creating volume '%s'", vol_name)
|
|
||||||
|
|
||||||
self._create_image(vol_name, { 'size': size })
|
|
||||||
|
|
||||||
if volume.encryption_key_id:
|
|
||||||
self._create_encrypted_volume(volume, volume.obj_context)
|
|
||||||
|
|
||||||
volume_update = {}
|
|
||||||
return volume_update
|
|
||||||
|
|
||||||
def _create_encrypted_volume(self, volume, context):
|
|
||||||
"""Create a new LUKS encrypted image directly in Vitastor."""
|
|
||||||
vol_name = utils.convert_str(volume.name)
|
|
||||||
f, opts = self._encrypt_opts(volume, context)
|
|
||||||
# FIXME: Check if it works at all :-)
|
|
||||||
self._execute(
|
|
||||||
'qemu-img', 'convert', '-f', 'luks', *opts,
|
|
||||||
'vitastor:image='+vol_name.replace(':', '\\:')+self._qemu_args(),
|
|
||||||
'%sM' % (volume.size * 1024)
|
|
||||||
)
|
|
||||||
f.close()
|
|
||||||
|
|
||||||
def _encrypt_opts(self, volume, context):
|
|
||||||
encryption = volume_utils.check_encryption_provider(self.db, volume, context)
|
|
||||||
# Fetch the key associated with the volume and decode the passphrase
|
|
||||||
keymgr = key_manager.API(CONF)
|
|
||||||
key = keymgr.get(context, encryption['encryption_key_id'])
|
|
||||||
passphrase = binascii.hexlify(key.get_encoded()).decode('utf-8')
|
|
||||||
# Decode the dm-crypt style cipher spec into something qemu-img can use
|
|
||||||
cipher_spec = image_utils.decode_cipher(encryption['cipher'], encryption['key_size'])
|
|
||||||
tmp_dir = volume_utils.image_conversion_dir()
|
|
||||||
f = tempfile.NamedTemporaryFile(prefix = 'luks_', dir = tmp_dir)
|
|
||||||
f.write(passphrase)
|
|
||||||
f.flush()
|
|
||||||
return (f, [
|
|
||||||
'--object', 'secret,id=luks_sec,format=raw,file=%(passfile)s' % {'passfile': f.name},
|
|
||||||
'-o', 'key-secret=luks_sec,cipher-alg=%(cipher_alg)s,cipher-mode=%(cipher_mode)s,ivgen-alg=%(ivgen_alg)s' % cipher_spec,
|
|
||||||
])
|
|
||||||
|
|
||||||
def create_snapshot(self, snapshot):
|
|
||||||
"""Creates a volume snapshot."""
|
|
||||||
|
|
||||||
vol_name = utils.convert_str(snapshot.volume_name)
|
|
||||||
snap_name = utils.convert_str(snapshot.name)
|
|
||||||
if snap_name.find('@') >= 0 or snap_name.find('/') >= 0:
|
|
||||||
raise exception.VolumeBackendAPIException(data = '@ and / are forbidden in volume and snapshot names')
|
|
||||||
self._create_snapshot(vol_name, vol_name+'@'+snap_name)
|
|
||||||
|
|
||||||
def snapshot_revert_use_temp_snapshot(self):
|
|
||||||
"""Disable the use of a temporary snapshot on revert."""
|
|
||||||
return False
|
|
||||||
|
|
||||||
def revert_to_snapshot(self, context, volume, snapshot):
|
|
||||||
"""Revert a volume to a given snapshot."""
|
|
||||||
|
|
||||||
vol_name = utils.convert_str(snapshot.volume_name)
|
|
||||||
snap_name = utils.convert_str(snapshot.name)
|
|
||||||
|
|
||||||
# Delete the image and recreate it from the snapshot
|
|
||||||
args = [ 'vitastor-cli', 'rm', vol_name, *(self._vitastor_args()) ]
|
|
||||||
try:
|
|
||||||
self._execute(*args)
|
|
||||||
except processutils.ProcessExecutionError as exc:
|
|
||||||
LOG.error("Failed to delete image "+vol_name+": "+exc)
|
|
||||||
raise exception.VolumeBackendAPIException(data = exc.stderr)
|
|
||||||
args = [
|
|
||||||
'vitastor-cli', 'create', '--parent', vol_name+'@'+snap_name,
|
|
||||||
vol_name, *(self._vitastor_args())
|
|
||||||
]
|
|
||||||
try:
|
|
||||||
self._execute(*args)
|
|
||||||
except processutils.ProcessExecutionError as exc:
|
|
||||||
LOG.error("Failed to recreate image "+vol_name+" from "+vol_name+"@"+snap_name+": "+exc)
|
|
||||||
raise exception.VolumeBackendAPIException(data = exc.stderr)
|
|
||||||
|
|
||||||
def delete_snapshot(self, snapshot):
|
|
||||||
"""Deletes a snapshot."""
|
|
||||||
|
|
||||||
vol_name = utils.convert_str(snapshot.volume_name)
|
|
||||||
snap_name = utils.convert_str(snapshot.name)
|
|
||||||
|
|
||||||
args = [
|
|
||||||
'vitastor-cli', 'rm', vol_name+'@'+snap_name,
|
|
||||||
*(self._vitastor_args())
|
|
||||||
]
|
|
||||||
try:
|
|
||||||
self._execute(*args)
|
|
||||||
except processutils.ProcessExecutionError as exc:
|
|
||||||
LOG.error("Failed to remove snapshot "+vol_name+'@'+snap_name+": "+exc)
|
|
||||||
raise exception.VolumeBackendAPIException(data = exc.stderr)
|
|
||||||
|
|
||||||
def _child_count(self, parents):
|
|
||||||
children = 0
|
|
||||||
def add_child(kv):
|
|
||||||
nonlocal children
|
|
||||||
children += self._check_parent(kv, parents)
|
|
||||||
self._etcd_foreach('config/inode', lambda kv: add_child(kv))
|
|
||||||
return children
|
|
||||||
|
|
||||||
def _check_parent(self, kv, parents):
|
|
||||||
if 'parent_id' not in kv['value']:
|
|
||||||
return 0
|
|
||||||
parent_id = kv['value']['parent_id']
|
|
||||||
_, _, pool_id, inode_id = kv['key'].split('/')
|
|
||||||
parent_pool_id = pool_id
|
|
||||||
if 'parent_pool_id' in kv['value'] and kv['value']['parent_pool_id']:
|
|
||||||
parent_pool_id = kv['value']['parent_pool_id']
|
|
||||||
inode = (int(pool_id) << 48) | (int(inode_id) & 0xffffffffffff)
|
|
||||||
parent = (int(parent_pool_id) << 48) | (int(parent_id) & 0xffffffffffff)
|
|
||||||
if parent in parents and inode not in parents:
|
|
||||||
return 1
|
|
||||||
return 0
|
|
||||||
|
|
||||||
def create_cloned_volume(self, volume, src_vref):
|
|
||||||
"""Create a cloned volume from another volume."""
|
|
||||||
|
|
||||||
size = int(volume.size) * units.Gi
|
|
||||||
src_name = utils.convert_str(src_vref.name)
|
|
||||||
dest_name = utils.convert_str(volume.name)
|
|
||||||
if dest_name.find('@') >= 0 or dest_name.find('/') >= 0:
|
|
||||||
raise exception.VolumeBackendAPIException(data = '@ and / are forbidden in volume and snapshot names')
|
|
||||||
|
|
||||||
# FIXME Do full copy if requested (cfg.disable_clone)
|
|
||||||
|
|
||||||
if src_vref.admin_metadata.get('readonly') == 'True':
|
|
||||||
# source volume is a volume-image cache entry or other readonly volume
|
|
||||||
# clone without intermediate snapshot
|
|
||||||
src = self._get_image(src_name)
|
|
||||||
LOG.debug("creating image '%s' from '%s'", dest_name, src_name)
|
|
||||||
new_cfg = self._create_image(dest_name, {
|
|
||||||
'size': size,
|
|
||||||
'parent_id': src['idx']['id'],
|
|
||||||
'parent_pool_id': src['idx']['pool_id'],
|
|
||||||
})
|
|
||||||
return {}
|
|
||||||
|
|
||||||
clone_snap = "%s@%s.clone_snap" % (src_name, dest_name)
|
|
||||||
make_img = True
|
|
||||||
if (volume.display_name and
|
|
||||||
volume.display_name.startswith('image-') and
|
|
||||||
src_vref.project_id != volume.project_id):
|
|
||||||
# idiotic openstack creates image-volume cache entries
|
|
||||||
# as clones of normal VM volumes... :-X prevent it :-D
|
|
||||||
clone_snap = dest_name
|
|
||||||
make_img = False
|
|
||||||
|
|
||||||
LOG.debug("creating layer '%s' under '%s'", clone_snap, src_name)
|
|
||||||
new_cfg = self._create_snapshot(src_name, clone_snap, True)
|
|
||||||
if make_img:
|
|
||||||
# Then create a clone from it
|
|
||||||
new_cfg = self._create_image(dest_name, {
|
|
||||||
'size': size,
|
|
||||||
'parent_id': new_cfg['parent_id'],
|
|
||||||
'parent_pool_id': new_cfg['parent_pool_id'],
|
|
||||||
})
|
|
||||||
|
|
||||||
return {}
|
|
||||||
|
|
||||||
def create_volume_from_snapshot(self, volume, snapshot):
|
|
||||||
"""Creates a cloned volume from an existing snapshot."""
|
|
||||||
|
|
||||||
vol_name = utils.convert_str(volume.name)
|
|
||||||
snap_name = utils.convert_str(snapshot.name)
|
|
||||||
|
|
||||||
snap = self._get_image('volume-'+snapshot.volume_id+'@'+snap_name)
|
|
||||||
if not snap:
|
|
||||||
raise exception.SnapshotNotFound(snapshot_id = snap_name)
|
|
||||||
snap_inode_id = int(resp['responses'][0]['kvs'][0]['value']['id'])
|
|
||||||
snap_pool_id = int(resp['responses'][0]['kvs'][0]['value']['pool_id'])
|
|
||||||
|
|
||||||
size = snap['cfg']['size']
|
|
||||||
if int(volume.size):
|
|
||||||
size = int(volume.size) * units.Gi
|
|
||||||
new_cfg = self._create_image(vol_name, {
|
|
||||||
'size': size,
|
|
||||||
'parent_id': snap['idx']['id'],
|
|
||||||
'parent_pool_id': snap['idx']['pool_id'],
|
|
||||||
})
|
|
||||||
|
|
||||||
return {}
|
|
||||||
|
|
||||||
def _vitastor_args(self):
|
|
||||||
args = []
|
|
||||||
for k in [ 'config_path', 'etcd_address', 'etcd_prefix' ]:
|
|
||||||
v = self.configuration.safe_get('vitastor_'+k)
|
|
||||||
if v:
|
|
||||||
args.extend(['--'+k, v])
|
|
||||||
return args
|
|
||||||
|
|
||||||
def _qemu_args(self):
|
|
||||||
args = ''
|
|
||||||
for k in [ 'config_path', 'etcd_address', 'etcd_prefix' ]:
|
|
||||||
v = self.configuration.safe_get('vitastor_'+k)
|
|
||||||
kk = k
|
|
||||||
if kk == 'etcd_address':
|
|
||||||
# FIXME use etcd_address in qemu driver
|
|
||||||
kk = 'etcd_host'
|
|
||||||
if v:
|
|
||||||
args += ':'+kk.replace('_', '-')+'='+v.replace(':', '\\:')
|
|
||||||
return args
|
|
||||||
|
|
||||||
def delete_volume(self, volume):
|
|
||||||
"""Deletes a logical volume."""
|
|
||||||
|
|
||||||
vol_name = utils.convert_str(volume.name)
|
|
||||||
|
|
||||||
# Find the volume and all its snapshots
|
|
||||||
range_end = b'index/image/' + vol_name.encode('utf-8')
|
|
||||||
range_end = range_end[0 : len(range_end)-1] + six.int2byte(range_end[len(range_end)-1] + 1)
|
|
||||||
resp = self._etcd_txn({ 'success': [
|
|
||||||
{ 'request_range': { 'key': 'index/image/'+vol_name, 'range_end': range_end } },
|
|
||||||
] })
|
|
||||||
if len(resp['responses'][0]['kvs']) == 0:
|
|
||||||
# already deleted
|
|
||||||
LOG.info("volume %s no longer exists in backend", vol_name)
|
|
||||||
return
|
|
||||||
layers = resp['responses'][0]['kvs']
|
|
||||||
layer_ids = {}
|
|
||||||
for kv in layers:
|
|
||||||
inode_id = int(kv['value']['id'])
|
|
||||||
pool_id = int(kv['value']['pool_id'])
|
|
||||||
inode_pool_id = (pool_id << 48) | (inode_id & 0xffffffffffff)
|
|
||||||
layer_ids[inode_pool_id] = True
|
|
||||||
|
|
||||||
# Check if the volume has clones and raise 'busy' if so
|
|
||||||
children = self._child_count(layer_ids)
|
|
||||||
if children > 0:
|
|
||||||
raise exception.VolumeIsBusy(volume_name = vol_name)
|
|
||||||
|
|
||||||
# Clear data
|
|
||||||
for kv in layers:
|
|
||||||
args = [
|
|
||||||
'vitastor-cli', 'rm-data', '--pool', str(kv['value']['pool_id']),
|
|
||||||
'--inode', str(kv['value']['id']), '--progress', '0',
|
|
||||||
*(self._vitastor_args())
|
|
||||||
]
|
|
||||||
try:
|
|
||||||
self._execute(*args)
|
|
||||||
except processutils.ProcessExecutionError as exc:
|
|
||||||
LOG.error("Failed to remove layer "+kv['key']+": "+exc)
|
|
||||||
raise exception.VolumeBackendAPIException(data = exc.stderr)
|
|
||||||
|
|
||||||
# Delete all layers from etcd
|
|
||||||
requests = []
|
|
||||||
for kv in layers:
|
|
||||||
requests.append({ 'request_delete_range': { 'key': kv['key'] } })
|
|
||||||
requests.append({ 'request_delete_range': { 'key': 'config/inode/'+str(kv['value']['pool_id'])+'/'+str(kv['value']['id']) } })
|
|
||||||
self._etcd_txn({ 'success': requests })
|
|
||||||
|
|
||||||
def retype(self, context, volume, new_type, diff, host):
|
|
||||||
"""Change extra type specifications for a volume."""
|
|
||||||
|
|
||||||
# FIXME Maybe (in the future) support multiple pools as different types
|
|
||||||
return True, {}
|
|
||||||
|
|
||||||
def ensure_export(self, context, volume):
|
|
||||||
"""Synchronously recreates an export for a logical volume."""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def create_export(self, context, volume, connector):
|
|
||||||
"""Exports the volume."""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def remove_export(self, context, volume):
|
|
||||||
"""Removes an export for a logical volume."""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def _create_image(self, vol_name, cfg):
|
|
||||||
pool_s = str(self.cfg['pool_id'])
|
|
||||||
image_id = 0
|
|
||||||
while image_id == 0:
|
|
||||||
# check if the image already exists and find a free ID
|
|
||||||
resp = self._etcd_txn({ 'success': [
|
|
||||||
{ 'request_range': { 'key': 'index/image/'+vol_name } },
|
|
||||||
{ 'request_range': { 'key': 'index/maxid/'+pool_s } },
|
|
||||||
] })
|
|
||||||
if len(resp['responses'][0]['kvs']) > 0:
|
|
||||||
# already exists
|
|
||||||
raise exception.VolumeBackendAPIException(data = 'Volume '+vol_name+' already exists')
|
|
||||||
image_id, id_mod = self._next_id(resp['responses'][1])
|
|
||||||
# try to create the image
|
|
||||||
resp = self._etcd_txn({ 'compare': [
|
|
||||||
{ 'target': 'MOD', 'mod_revision': id_mod, 'key': 'index/maxid/'+pool_s },
|
|
||||||
{ 'target': 'VERSION', 'version': 0, 'key': 'index/image/'+vol_name },
|
|
||||||
{ 'target': 'VERSION', 'version': 0, 'key': 'config/inode/'+pool_s+'/'+str(image_id) },
|
|
||||||
], 'success': [
|
|
||||||
{ 'request_put': { 'key': 'index/maxid/'+pool_s, 'value': image_id } },
|
|
||||||
{ 'request_put': { 'key': 'index/image/'+vol_name, 'value': json.dumps({
|
|
||||||
'id': image_id, 'pool_id': self.cfg['pool_id']
|
|
||||||
}) } },
|
|
||||||
{ 'request_put': { 'key': 'config/inode/'+pool_s+'/'+str(image_id), 'value': json.dumps({
|
|
||||||
**cfg, 'name': vol_name,
|
|
||||||
}) } },
|
|
||||||
] })
|
|
||||||
if not resp.get('succeeded'):
|
|
||||||
# repeat
|
|
||||||
image_id = 0
|
|
||||||
|
|
||||||
def _create_snapshot(self, vol_name, snap_vol_name, allow_existing = False):
|
|
||||||
while True:
|
|
||||||
# check if the image already exists and snapshot doesn't
|
|
||||||
resp = self._etcd_txn({ 'success': [
|
|
||||||
{ 'request_range': { 'key': 'index/image/'+vol_name } },
|
|
||||||
{ 'request_range': { 'key': 'index/image/'+snap_vol_name } },
|
|
||||||
] })
|
|
||||||
if len(resp['responses'][0]['kvs']) == 0:
|
|
||||||
raise exception.VolumeBackendAPIException(data = 'Volume '+vol_name+' does not exist')
|
|
||||||
if len(resp['responses'][1]['kvs']) > 0:
|
|
||||||
if allow_existing:
|
|
||||||
snap_idx = resp['responses'][1]['kvs'][0]['value']
|
|
||||||
resp = self._etcd_txn({ 'success': [
|
|
||||||
{ 'request_range': { 'key': 'config/inode/'+str(snap_idx['pool_id'])+'/'+str(snap_idx['id']) } },
|
|
||||||
] })
|
|
||||||
if len(resp['responses'][0]['kvs']) == 0:
|
|
||||||
raise exception.VolumeBackendAPIException(data =
|
|
||||||
'Volume '+snap_vol_name+' is already indexed, but does not exist'
|
|
||||||
)
|
|
||||||
return resp['responses'][0]['kvs'][0]['value']
|
|
||||||
raise exception.VolumeBackendAPIException(
|
|
||||||
data = 'Volume '+snap_vol_name+' already exists'
|
|
||||||
)
|
|
||||||
vol_idx = resp['responses'][0]['kvs'][0]['value']
|
|
||||||
vol_idx_mod = resp['responses'][0]['kvs'][0]['mod_revision']
|
|
||||||
# get image inode config and find a new ID
|
|
||||||
resp = self._etcd_txn({ 'success': [
|
|
||||||
{ 'request_range': { 'key': 'config/inode/'+str(vol_idx['pool_id'])+'/'+str(vol_idx['id']) } },
|
|
||||||
{ 'request_range': { 'key': 'index/maxid/'+str(self.cfg['pool_id']) } },
|
|
||||||
] })
|
|
||||||
if len(resp['responses'][0]['kvs']) == 0:
|
|
||||||
raise exception.VolumeBackendAPIException(data = 'Volume '+vol_name+' does not exist')
|
|
||||||
vol_cfg = resp['responses'][0]['kvs'][0]['value']
|
|
||||||
vol_mod = resp['responses'][0]['kvs'][0]['mod_revision']
|
|
||||||
new_id, id_mod = self._next_id(resp['responses'][1])
|
|
||||||
# try to redirect image to the new inode
|
|
||||||
new_cfg = {
|
|
||||||
**vol_cfg, 'name': vol_name, 'parent_id': vol_idx['id'], 'parent_pool_id': vol_idx['pool_id']
|
|
||||||
}
|
|
||||||
resp = self._etcd_txn({ 'compare': [
|
|
||||||
{ 'target': 'MOD', 'mod_revision': vol_idx_mod, 'key': 'index/image/'+vol_name },
|
|
||||||
{ 'target': 'MOD', 'mod_revision': vol_mod, 'key': 'config/inode/'+str(vol_idx['pool_id'])+'/'+str(vol_idx['id']) },
|
|
||||||
{ 'target': 'MOD', 'mod_revision': id_mod, 'key': 'index/maxid/'+str(self.cfg['pool_id']) },
|
|
||||||
{ 'target': 'VERSION', 'version': 0, 'key': 'index/image/'+snap_vol_name },
|
|
||||||
{ 'target': 'VERSION', 'version': 0, 'key': 'config/inode/'+str(self.cfg['pool_id'])+'/'+str(new_id) },
|
|
||||||
], 'success': [
|
|
||||||
{ 'request_put': { 'key': 'index/maxid/'+str(self.cfg['pool_id']), 'value': new_id } },
|
|
||||||
{ 'request_put': { 'key': 'index/image/'+vol_name, 'value': json.dumps({
|
|
||||||
'id': new_id, 'pool_id': self.cfg['pool_id']
|
|
||||||
}) } },
|
|
||||||
{ 'request_put': { 'key': 'config/inode/'+str(self.cfg['pool_id'])+'/'+str(new_id), 'value': json.dumps(new_cfg) } },
|
|
||||||
{ 'request_put': { 'key': 'index/image/'+snap_vol_name, 'value': json.dumps({
|
|
||||||
'id': vol_idx['id'], 'pool_id': vol_idx['pool_id']
|
|
||||||
}) } },
|
|
||||||
{ 'request_put': { 'key': 'config/inode/'+str(vol_idx['pool_id'])+'/'+str(vol_idx['id']), 'value': json.dumps({
|
|
||||||
**vol_cfg, 'name': snap_vol_name, 'readonly': True
|
|
||||||
}) } }
|
|
||||||
] })
|
|
||||||
if resp.get('succeeded'):
|
|
||||||
return new_cfg
|
|
||||||
|
|
||||||
def initialize_connection(self, volume, connector):
|
|
||||||
data = {
|
|
||||||
'driver_volume_type': 'vitastor',
|
|
||||||
'data': {
|
|
||||||
'config_path': self.configuration.vitastor_config_path,
|
|
||||||
'etcd_address': self.configuration.vitastor_etcd_address,
|
|
||||||
'etcd_prefix': self.configuration.vitastor_etcd_prefix,
|
|
||||||
'name': volume.name,
|
|
||||||
'logical_block_size': '512',
|
|
||||||
'physical_block_size': '4096',
|
|
||||||
}
|
|
||||||
}
|
|
||||||
LOG.debug('connection data: %s', data)
|
|
||||||
return data
|
|
||||||
|
|
||||||
def terminate_connection(self, volume, connector, **kwargs):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def clone_image(self, context, volume, image_location, image_meta, image_service):
|
|
||||||
if image_location:
|
|
||||||
# Note: image_location[0] is glance image direct_url.
|
|
||||||
# image_location[1] contains the list of all locations (including
|
|
||||||
# direct_url) or None if show_multiple_locations is False in
|
|
||||||
# glance configuration.
|
|
||||||
if image_location[1]:
|
|
||||||
url_locations = [location['url'] for location in image_location[1]]
|
|
||||||
else:
|
|
||||||
url_locations = [image_location[0]]
|
|
||||||
# iterate all locations to look for a cloneable one.
|
|
||||||
for url_location in url_locations:
|
|
||||||
if url_location and url_location.startswith('cinder://'):
|
|
||||||
# The idea is to use cinder://<volume-id> Glance volumes as base images
|
|
||||||
base_vol = self.db.volume_get(context, url_location[len('cinder://') : ])
|
|
||||||
if not base_vol or base_vol.volume_type_id != volume.volume_type_id:
|
|
||||||
continue
|
|
||||||
size = int(volume.size) * units.Gi
|
|
||||||
dest_name = utils.convert_str(volume.name)
|
|
||||||
# Find or create the base snapshot
|
|
||||||
snap_cfg = self._create_snapshot(base_vol.name, base_vol.name+'@.clone_snap', True)
|
|
||||||
# Then create a clone from it
|
|
||||||
new_cfg = self._create_image(dest_name, {
|
|
||||||
'size': size,
|
|
||||||
'parent_id': snap_cfg['parent_id'],
|
|
||||||
'parent_pool_id': snap_cfg['parent_pool_id'],
|
|
||||||
})
|
|
||||||
return ({}, True)
|
|
||||||
return ({}, False)
|
|
||||||
|
|
||||||
def copy_image_to_encrypted_volume(self, context, volume, image_service, image_id):
|
|
||||||
self.copy_image_to_volume(context, volume, image_service, image_id, encrypted = True)
|
|
||||||
|
|
||||||
def copy_image_to_volume(self, context, volume, image_service, image_id, encrypted = False, disable_sparse = False):
|
|
||||||
tmp_dir = volume_utils.image_conversion_dir()
|
|
||||||
with tempfile.NamedTemporaryFile(dir = tmp_dir) as tmp:
|
|
||||||
image_utils.fetch_to_raw(
|
|
||||||
context, image_service, image_id, tmp.name,
|
|
||||||
self.configuration.volume_dd_blocksize, size = volume.size
|
|
||||||
)
|
|
||||||
out_format = [ '-O', 'raw' ]
|
|
||||||
if encrypted:
|
|
||||||
key_file, opts = self._encrypt_opts(volume, context)
|
|
||||||
out_format = [ '-O', 'luks', *opts ]
|
|
||||||
dest_name = utils.convert_str(volume.name)
|
|
||||||
self._try_execute(
|
|
||||||
'qemu-img', 'convert', '-f', 'raw', tmp.name, *out_format,
|
|
||||||
'vitastor:image='+dest_name.replace(':', '\\:')+self._qemu_args()
|
|
||||||
)
|
|
||||||
if encrypted:
|
|
||||||
key_file.close()
|
|
||||||
|
|
||||||
def copy_volume_to_image(self, context, volume, image_service, image_meta):
|
|
||||||
tmp_dir = volume_utils.image_conversion_dir()
|
|
||||||
tmp_file = os.path.join(tmp_dir, volume.name + '-' + image_meta['id'])
|
|
||||||
with fileutils.remove_path_on_error(tmp_file):
|
|
||||||
vol_name = utils.convert_str(volume.name)
|
|
||||||
self._try_execute(
|
|
||||||
'qemu-img', 'convert', '-f', 'raw',
|
|
||||||
'vitastor:image='+vol_name.replace(':', '\\:')+self._qemu_args(),
|
|
||||||
'-O', 'raw', tmp_file
|
|
||||||
)
|
|
||||||
# FIXME: Copy directly if the destination image is also in Vitastor
|
|
||||||
volume_utils.upload_volume(context, image_service, image_meta, tmp_file, volume)
|
|
||||||
os.unlink(tmp_file)
|
|
||||||
|
|
||||||
def _get_image(self, vol_name):
|
|
||||||
# find the image
|
|
||||||
resp = self._etcd_txn({ 'success': [
|
|
||||||
{ 'request_range': { 'key': 'index/image/'+vol_name } },
|
|
||||||
] })
|
|
||||||
if len(resp['responses'][0]['kvs']) == 0:
|
|
||||||
return None
|
|
||||||
vol_idx = resp['responses'][0]['kvs'][0]['value']
|
|
||||||
vol_idx_mod = resp['responses'][0]['kvs'][0]['mod_revision']
|
|
||||||
# get image inode config
|
|
||||||
resp = self._etcd_txn({ 'success': [
|
|
||||||
{ 'request_range': { 'key': 'config/inode/'+str(vol_idx['pool_id'])+'/'+str(vol_idx['id']) } },
|
|
||||||
] })
|
|
||||||
if len(resp['responses'][0]['kvs']) == 0:
|
|
||||||
return None
|
|
||||||
vol_cfg = resp['responses'][0]['kvs'][0]['value']
|
|
||||||
vol_cfg_mod = resp['responses'][0]['kvs'][0]['mod_revision']
|
|
||||||
return {
|
|
||||||
'cfg': vol_cfg,
|
|
||||||
'cfg_mod': vol_cfg_mod,
|
|
||||||
'idx': vol_idx,
|
|
||||||
'idx_mod': vol_idx_mod,
|
|
||||||
}
|
|
||||||
|
|
||||||
def extend_volume(self, volume, new_size):
|
|
||||||
"""Extend an existing volume."""
|
|
||||||
vol_name = utils.convert_str(volume.name)
|
|
||||||
while True:
|
|
||||||
vol = self._get_image(vol_name)
|
|
||||||
if not vol:
|
|
||||||
raise exception.VolumeBackendAPIException(data = 'Volume '+vol_name+' does not exist')
|
|
||||||
# change size
|
|
||||||
size = int(new_size) * units.Gi
|
|
||||||
if size == vol['cfg']['size']:
|
|
||||||
break
|
|
||||||
resp = self._etcd_txn({ 'compare': [ {
|
|
||||||
'target': 'MOD',
|
|
||||||
'mod_revision': vol['cfg_mod'],
|
|
||||||
'key': 'config/inode/'+str(vol['idx']['pool_id'])+'/'+str(vol['idx']['id']),
|
|
||||||
} ], 'success': [
|
|
||||||
{ 'request_put': {
|
|
||||||
'key': 'config/inode/'+str(vol['idx']['pool_id'])+'/'+str(vol['idx']['id']),
|
|
||||||
'value': json.dumps({ **vol['cfg'], 'size': size }),
|
|
||||||
} },
|
|
||||||
] })
|
|
||||||
if resp.get('succeeded'):
|
|
||||||
break
|
|
||||||
LOG.debug(
|
|
||||||
"Extend volume from %(old_size)s GB to %(new_size)s GB.",
|
|
||||||
{'old_size': volume.size, 'new_size': new_size}
|
|
||||||
)
|
|
||||||
|
|
||||||
def _add_manageable_volume(self, kv, manageable_volumes, cinder_ids):
|
|
||||||
cfg = kv['value']
|
|
||||||
if kv['key'].find('@') >= 0:
|
|
||||||
# snapshot
|
|
||||||
return
|
|
||||||
image_id = volume_utils.extract_id_from_volume_name(cfg['name'])
|
|
||||||
image_info = {
|
|
||||||
'reference': {'source-name': image_name},
|
|
||||||
'size': int(math.ceil(float(cfg['size']) / units.Gi)),
|
|
||||||
'cinder_id': None,
|
|
||||||
'extra_info': None,
|
|
||||||
}
|
|
||||||
if image_id in cinder_ids:
|
|
||||||
image_info['cinder_id'] = image_id
|
|
||||||
image_info['safe_to_manage'] = False
|
|
||||||
image_info['reason_not_safe'] = 'already managed'
|
|
||||||
else:
|
|
||||||
image_info['safe_to_manage'] = True
|
|
||||||
image_info['reason_not_safe'] = None
|
|
||||||
manageable_volumes.append(image_info)
|
|
||||||
|
|
||||||
def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, sort_keys, sort_dirs):
|
|
||||||
manageable_volumes = []
|
|
||||||
cinder_ids = [resource['id'] for resource in cinder_volumes]
|
|
||||||
|
|
||||||
# List all volumes
|
|
||||||
# FIXME: It's possible to use pagination in our case, but.. do we want it?
|
|
||||||
self._etcd_foreach('config/inode/'+str(self.cfg['pool_id']),
|
|
||||||
lambda kv: self._add_manageable_volume(kv, manageable_volumes, cinder_ids))
|
|
||||||
|
|
||||||
return volume_utils.paginate_entries_list(
|
|
||||||
manageable_volumes, marker, limit, offset, sort_keys, sort_dirs)
|
|
||||||
|
|
||||||
def _get_existing_name(existing_ref):
|
|
||||||
if not isinstance(existing_ref, dict):
|
|
||||||
existing_ref = {"source-name": existing_ref}
|
|
||||||
if 'source-name' not in existing_ref:
|
|
||||||
reason = _('Reference must contain source-name element.')
|
|
||||||
raise exception.ManageExistingInvalidReference(existing_ref=existing_ref, reason=reason)
|
|
||||||
src_name = utils.convert_str(existing_ref['source-name'])
|
|
||||||
if not src_name:
|
|
||||||
reason = _('Reference must contain source-name element.')
|
|
||||||
raise exception.ManageExistingInvalidReference(existing_ref=existing_ref, reason=reason)
|
|
||||||
return src_name
|
|
||||||
|
|
||||||
def manage_existing_get_size(self, volume, existing_ref):
|
|
||||||
"""Return size of an existing image for manage_existing.
|
|
||||||
|
|
||||||
:param volume: volume ref info to be set
|
|
||||||
:param existing_ref: {'source-name': <image name>}
|
|
||||||
"""
|
|
||||||
src_name = self._get_existing_name(existing_ref)
|
|
||||||
vol = self._get_image(src_name)
|
|
||||||
if not vol:
|
|
||||||
raise exception.VolumeBackendAPIException(data = 'Volume '+src_name+' does not exist')
|
|
||||||
return int(math.ceil(float(vol['cfg']['size']) / units.Gi))
|
|
||||||
|
|
||||||
def manage_existing(self, volume, existing_ref):
|
|
||||||
"""Manages an existing image.
|
|
||||||
|
|
||||||
Renames the image name to match the expected name for the volume.
|
|
||||||
|
|
||||||
:param volume: volume ref info to be set
|
|
||||||
:param existing_ref: {'source-name': <image name>}
|
|
||||||
"""
|
|
||||||
from_name = self._get_existing_name(existing_ref)
|
|
||||||
to_name = utils.convert_str(volume.name)
|
|
||||||
self._rename(from_name, to_name)
|
|
||||||
|
|
||||||
def _rename(self, from_name, to_name):
|
|
||||||
while True:
|
|
||||||
vol = self._get_image(from_name)
|
|
||||||
if not vol:
|
|
||||||
raise exception.VolumeBackendAPIException(data = 'Volume '+from_name+' does not exist')
|
|
||||||
to = self._get_image(to_name)
|
|
||||||
if to:
|
|
||||||
raise exception.VolumeBackendAPIException(data = 'Volume '+to_name+' already exists')
|
|
||||||
resp = self._etcd_txn({ 'compare': [
|
|
||||||
{ 'target': 'MOD', 'mod_revision': vol['idx_mod'], 'key': 'index/image/'+vol['cfg']['name'] },
|
|
||||||
{ 'target': 'MOD', 'mod_revision': vol['cfg_mod'], 'key': 'config/inode/'+str(vol['idx']['pool_id'])+'/'+str(vol['idx']['id']) },
|
|
||||||
{ 'target': 'VERSION', 'version': 0, 'key': 'index/image/'+to_name },
|
|
||||||
], 'success': [
|
|
||||||
{ 'request_delete_range': { 'key': 'index/image/'+vol['cfg']['name'] } },
|
|
||||||
{ 'request_put': { 'key': 'index/image/'+to_name, 'value': json.dumps(vol['idx']) } },
|
|
||||||
{ 'request_put': { 'key': 'config/inode/'+str(vol['idx']['pool_id'])+'/'+str(vol['idx']['id']),
|
|
||||||
'value': json.dumps({ **vol['cfg'], 'name': to_name }) } },
|
|
||||||
] })
|
|
||||||
if resp.get('succeeded'):
|
|
||||||
break
|
|
||||||
|
|
||||||
def unmanage(self, volume):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def _add_manageable_snapshot(self, kv, manageable_snapshots, cinder_ids):
|
|
||||||
cfg = kv['value']
|
|
||||||
dog = kv['key'].find('@')
|
|
||||||
if dog < 0:
|
|
||||||
# snapshot
|
|
||||||
return
|
|
||||||
image_name = kv['key'][0 : dog]
|
|
||||||
snap_name = kv['key'][dog+1 : ]
|
|
||||||
snapshot_id = volume_utils.extract_id_from_snapshot_name(snap_name)
|
|
||||||
snapshot_info = {
|
|
||||||
'reference': {'source-name': snap_name},
|
|
||||||
'size': int(math.ceil(float(cfg['size']) / units.Gi)),
|
|
||||||
'cinder_id': None,
|
|
||||||
'extra_info': None,
|
|
||||||
'safe_to_manage': False,
|
|
||||||
'reason_not_safe': None,
|
|
||||||
'source_reference': {'source-name': image_name}
|
|
||||||
}
|
|
||||||
if snapshot_id in cinder_ids:
|
|
||||||
# Exclude snapshots already managed.
|
|
||||||
snapshot_info['reason_not_safe'] = ('already managed')
|
|
||||||
snapshot_info['cinder_id'] = snapshot_id
|
|
||||||
elif snap_name.endswith('.clone_snap'):
|
|
||||||
# Exclude clone snapshot.
|
|
||||||
snapshot_info['reason_not_safe'] = ('used for clone snap')
|
|
||||||
else:
|
|
||||||
snapshot_info['safe_to_manage'] = True
|
|
||||||
manageable_snapshots.append(snapshot_info)
|
|
||||||
|
|
||||||
def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs):
|
|
||||||
"""List manageable snapshots in Vitastor."""
|
|
||||||
manageable_snapshots = []
|
|
||||||
cinder_snapshot_ids = [resource['id'] for resource in cinder_snapshots]
|
|
||||||
# List all volumes
|
|
||||||
# FIXME: It's possible to use pagination in our case, but.. do we want it?
|
|
||||||
self._etcd_foreach('config/inode/'+str(self.cfg['pool_id']),
|
|
||||||
lambda kv: self._add_manageable_volume(kv, manageable_snapshots, cinder_snapshot_ids))
|
|
||||||
return volume_utils.paginate_entries_list(
|
|
||||||
manageable_snapshots, marker, limit, offset, sort_keys, sort_dirs)
|
|
||||||
|
|
||||||
def manage_existing_snapshot_get_size(self, snapshot, existing_ref):
|
|
||||||
"""Return size of an existing image for manage_existing.
|
|
||||||
|
|
||||||
:param snapshot: snapshot ref info to be set
|
|
||||||
:param existing_ref: {'source-name': <name of snapshot>}
|
|
||||||
"""
|
|
||||||
vol_name = utils.convert_str(snapshot.volume_name)
|
|
||||||
snap_name = self._get_existing_name(existing_ref)
|
|
||||||
vol = self._get_image(vol_name+'@'+snap_name)
|
|
||||||
if not vol:
|
|
||||||
raise exception.ManageExistingInvalidReference(
|
|
||||||
existing_ref=snapshot_name, reason='Specified snapshot does not exist.'
|
|
||||||
)
|
|
||||||
return int(math.ceil(float(vol['cfg']['size']) / units.Gi))
|
|
||||||
|
|
||||||
def manage_existing_snapshot(self, snapshot, existing_ref):
|
|
||||||
"""Manages an existing snapshot.
|
|
||||||
|
|
||||||
Renames the snapshot name to match the expected name for the snapshot.
|
|
||||||
Error checking done by manage_existing_get_size is not repeated.
|
|
||||||
|
|
||||||
:param snapshot: snapshot ref info to be set
|
|
||||||
:param existing_ref: {'source-name': <name of snapshot>}
|
|
||||||
"""
|
|
||||||
vol_name = utils.convert_str(snapshot.volume_name)
|
|
||||||
snap_name = self._get_existing_name(existing_ref)
|
|
||||||
from_name = vol_name+'@'+snap_name
|
|
||||||
to_name = vol_name+'@'+utils.convert_str(snapshot.name)
|
|
||||||
self._rename(from_name, to_name)
|
|
||||||
|
|
||||||
def unmanage_snapshot(self, snapshot):
|
|
||||||
"""Removes the specified snapshot from Cinder management."""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def _dumps(self, obj):
|
|
||||||
return json.dumps(obj, separators=(',', ':'), sort_keys=True)
|
|
|
@ -1,643 +0,0 @@
|
||||||
commit 1f7e90e36b2afca0312392979b96d31951a8d66b
|
|
||||||
Author: Vitaliy Filippov <vitalif@yourcmc.ru>
|
|
||||||
Date: Thu Jun 27 01:34:54 2024 +0300
|
|
||||||
|
|
||||||
Add Vitastor support
|
|
||||||
|
|
||||||
diff --git a/include/libvirt/libvirt-storage.h b/include/libvirt/libvirt-storage.h
|
|
||||||
index aaad4a3da1..5f5daa8341 100644
|
|
||||||
--- a/include/libvirt/libvirt-storage.h
|
|
||||||
+++ b/include/libvirt/libvirt-storage.h
|
|
||||||
@@ -326,6 +326,7 @@ typedef enum {
|
|
||||||
VIR_CONNECT_LIST_STORAGE_POOLS_ZFS = 1 << 17, /* (Since: 1.2.8) */
|
|
||||||
VIR_CONNECT_LIST_STORAGE_POOLS_VSTORAGE = 1 << 18, /* (Since: 3.1.0) */
|
|
||||||
VIR_CONNECT_LIST_STORAGE_POOLS_ISCSI_DIRECT = 1 << 19, /* (Since: 5.6.0) */
|
|
||||||
+ VIR_CONNECT_LIST_STORAGE_POOLS_VITASTOR = 1 << 20, /* (Since: 5.0.0) */
|
|
||||||
} virConnectListAllStoragePoolsFlags;
|
|
||||||
|
|
||||||
int virConnectListAllStoragePools(virConnectPtr conn,
|
|
||||||
diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c
|
|
||||||
index fde594f811..66537db3e3 100644
|
|
||||||
--- a/src/conf/domain_conf.c
|
|
||||||
+++ b/src/conf/domain_conf.c
|
|
||||||
@@ -7220,7 +7220,8 @@ virDomainDiskSourceNetworkParse(xmlNodePtr node,
|
|
||||||
src->configFile = virXPathString("string(./config/@file)", ctxt);
|
|
||||||
|
|
||||||
if (src->protocol == VIR_STORAGE_NET_PROTOCOL_HTTP ||
|
|
||||||
- src->protocol == VIR_STORAGE_NET_PROTOCOL_HTTPS)
|
|
||||||
+ src->protocol == VIR_STORAGE_NET_PROTOCOL_HTTPS ||
|
|
||||||
+ src->protocol == VIR_STORAGE_NET_PROTOCOL_VITASTOR)
|
|
||||||
src->query = virXMLPropString(node, "query");
|
|
||||||
|
|
||||||
if (virDomainStorageNetworkParseHosts(node, ctxt, &src->hosts, &src->nhosts) < 0)
|
|
||||||
@@ -30734,6 +30735,7 @@ virDomainStorageSourceTranslateSourcePool(virStorageSource *src,
|
|
||||||
|
|
||||||
case VIR_STORAGE_POOL_MPATH:
|
|
||||||
case VIR_STORAGE_POOL_RBD:
|
|
||||||
+ case VIR_STORAGE_POOL_VITASTOR:
|
|
||||||
case VIR_STORAGE_POOL_SHEEPDOG:
|
|
||||||
case VIR_STORAGE_POOL_GLUSTER:
|
|
||||||
case VIR_STORAGE_POOL_LAST:
|
|
||||||
diff --git a/src/conf/domain_validate.c b/src/conf/domain_validate.c
|
|
||||||
index 395e036e8f..8a0190f85b 100644
|
|
||||||
--- a/src/conf/domain_validate.c
|
|
||||||
+++ b/src/conf/domain_validate.c
|
|
||||||
@@ -495,6 +495,7 @@ virDomainDiskDefValidateSourceChainOne(const virStorageSource *src)
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_RBD:
|
|
||||||
break;
|
|
||||||
|
|
||||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_NBD:
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_GLUSTER:
|
|
||||||
@@ -541,7 +542,7 @@ virDomainDiskDefValidateSourceChainOne(const virStorageSource *src)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
- /* internal snapshots and config files are currently supported only with rbd: */
|
|
||||||
+ /* internal snapshots are currently supported only with rbd: */
|
|
||||||
if (virStorageSourceGetActualType(src) != VIR_STORAGE_TYPE_NETWORK &&
|
|
||||||
src->protocol != VIR_STORAGE_NET_PROTOCOL_RBD) {
|
|
||||||
if (src->snapshot) {
|
|
||||||
@@ -549,10 +550,15 @@ virDomainDiskDefValidateSourceChainOne(const virStorageSource *src)
|
|
||||||
_("<snapshot> element is currently supported only with 'rbd' disks"));
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
+ }
|
|
||||||
|
|
||||||
+ /* config files are currently supported only with rbd and vitastor: */
|
|
||||||
+ if (virStorageSourceGetActualType(src) != VIR_STORAGE_TYPE_NETWORK &&
|
|
||||||
+ src->protocol != VIR_STORAGE_NET_PROTOCOL_RBD &&
|
|
||||||
+ src->protocol != VIR_STORAGE_NET_PROTOCOL_VITASTOR) {
|
|
||||||
if (src->configFile) {
|
|
||||||
virReportError(VIR_ERR_XML_ERROR, "%s",
|
|
||||||
- _("<config> element is currently supported only with 'rbd' disks"));
|
|
||||||
+ _("<config> element is currently supported only with 'rbd' and 'vitastor' disks"));
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
diff --git a/src/conf/schemas/domaincommon.rng b/src/conf/schemas/domaincommon.rng
|
|
||||||
index a46a824f88..4c5b720643 100644
|
|
||||||
--- a/src/conf/schemas/domaincommon.rng
|
|
||||||
+++ b/src/conf/schemas/domaincommon.rng
|
|
||||||
@@ -1997,6 +1997,35 @@
|
|
||||||
</element>
|
|
||||||
</define>
|
|
||||||
|
|
||||||
+ <define name="diskSourceNetworkProtocolVitastor">
|
|
||||||
+ <element name="source">
|
|
||||||
+ <interleave>
|
|
||||||
+ <attribute name="protocol">
|
|
||||||
+ <value>vitastor</value>
|
|
||||||
+ </attribute>
|
|
||||||
+ <ref name="diskSourceCommon"/>
|
|
||||||
+ <optional>
|
|
||||||
+ <attribute name="name"/>
|
|
||||||
+ </optional>
|
|
||||||
+ <optional>
|
|
||||||
+ <attribute name="query"/>
|
|
||||||
+ </optional>
|
|
||||||
+ <zeroOrMore>
|
|
||||||
+ <ref name="diskSourceNetworkHost"/>
|
|
||||||
+ </zeroOrMore>
|
|
||||||
+ <optional>
|
|
||||||
+ <element name="config">
|
|
||||||
+ <attribute name="file">
|
|
||||||
+ <ref name="absFilePath"/>
|
|
||||||
+ </attribute>
|
|
||||||
+ <empty/>
|
|
||||||
+ </element>
|
|
||||||
+ </optional>
|
|
||||||
+ <empty/>
|
|
||||||
+ </interleave>
|
|
||||||
+ </element>
|
|
||||||
+ </define>
|
|
||||||
+
|
|
||||||
<define name="diskSourceNetworkProtocolISCSI">
|
|
||||||
<element name="source">
|
|
||||||
<attribute name="protocol">
|
|
||||||
@@ -2347,6 +2376,7 @@
|
|
||||||
<ref name="diskSourceNetworkProtocolSimple"/>
|
|
||||||
<ref name="diskSourceNetworkProtocolVxHS"/>
|
|
||||||
<ref name="diskSourceNetworkProtocolNFS"/>
|
|
||||||
+ <ref name="diskSourceNetworkProtocolVitastor"/>
|
|
||||||
</choice>
|
|
||||||
</define>
|
|
||||||
|
|
||||||
diff --git a/src/conf/storage_conf.c b/src/conf/storage_conf.c
|
|
||||||
index 68842004b7..1d69a788b6 100644
|
|
||||||
--- a/src/conf/storage_conf.c
|
|
||||||
+++ b/src/conf/storage_conf.c
|
|
||||||
@@ -56,7 +56,7 @@ VIR_ENUM_IMPL(virStoragePool,
|
|
||||||
"logical", "disk", "iscsi",
|
|
||||||
"iscsi-direct", "scsi", "mpath",
|
|
||||||
"rbd", "sheepdog", "gluster",
|
|
||||||
- "zfs", "vstorage",
|
|
||||||
+ "zfs", "vstorage", "vitastor",
|
|
||||||
);
|
|
||||||
|
|
||||||
VIR_ENUM_IMPL(virStoragePoolFormatFileSystem,
|
|
||||||
@@ -242,6 +242,18 @@ static virStoragePoolTypeInfo poolTypeInfo[] = {
|
|
||||||
.formatToString = virStorageFileFormatTypeToString,
|
|
||||||
}
|
|
||||||
},
|
|
||||||
+ {.poolType = VIR_STORAGE_POOL_VITASTOR,
|
|
||||||
+ .poolOptions = {
|
|
||||||
+ .flags = (VIR_STORAGE_POOL_SOURCE_HOST |
|
|
||||||
+ VIR_STORAGE_POOL_SOURCE_NETWORK |
|
|
||||||
+ VIR_STORAGE_POOL_SOURCE_NAME),
|
|
||||||
+ },
|
|
||||||
+ .volOptions = {
|
|
||||||
+ .defaultFormat = VIR_STORAGE_FILE_RAW,
|
|
||||||
+ .formatFromString = virStorageVolumeFormatFromString,
|
|
||||||
+ .formatToString = virStorageFileFormatTypeToString,
|
|
||||||
+ }
|
|
||||||
+ },
|
|
||||||
{.poolType = VIR_STORAGE_POOL_SHEEPDOG,
|
|
||||||
.poolOptions = {
|
|
||||||
.flags = (VIR_STORAGE_POOL_SOURCE_HOST |
|
|
||||||
@@ -538,6 +550,11 @@ virStoragePoolDefParseSource(xmlXPathContextPtr ctxt,
|
|
||||||
_("element 'name' is mandatory for RBD pool"));
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
+ if (pool_type == VIR_STORAGE_POOL_VITASTOR && source->name == NULL) {
|
|
||||||
+ virReportError(VIR_ERR_XML_ERROR, "%s",
|
|
||||||
+ _("element 'name' is mandatory for Vitastor pool"));
|
|
||||||
+ return -1;
|
|
||||||
+ }
|
|
||||||
|
|
||||||
if (options->formatFromString) {
|
|
||||||
g_autofree char *format = NULL;
|
|
||||||
@@ -1127,6 +1144,7 @@ virStoragePoolDefFormatBuf(virBuffer *buf,
|
|
||||||
/* RBD, Sheepdog, Gluster and Iscsi-direct devices are not local block devs nor
|
|
||||||
* files, so they don't have a target */
|
|
||||||
if (def->type != VIR_STORAGE_POOL_RBD &&
|
|
||||||
+ def->type != VIR_STORAGE_POOL_VITASTOR &&
|
|
||||||
def->type != VIR_STORAGE_POOL_SHEEPDOG &&
|
|
||||||
def->type != VIR_STORAGE_POOL_GLUSTER &&
|
|
||||||
def->type != VIR_STORAGE_POOL_ISCSI_DIRECT) {
|
|
||||||
diff --git a/src/conf/storage_conf.h b/src/conf/storage_conf.h
|
|
||||||
index fc67957cfe..720c07ef74 100644
|
|
||||||
--- a/src/conf/storage_conf.h
|
|
||||||
+++ b/src/conf/storage_conf.h
|
|
||||||
@@ -103,6 +103,7 @@ typedef enum {
|
|
||||||
VIR_STORAGE_POOL_GLUSTER, /* Gluster device */
|
|
||||||
VIR_STORAGE_POOL_ZFS, /* ZFS */
|
|
||||||
VIR_STORAGE_POOL_VSTORAGE, /* Virtuozzo Storage */
|
|
||||||
+ VIR_STORAGE_POOL_VITASTOR, /* Vitastor */
|
|
||||||
|
|
||||||
VIR_STORAGE_POOL_LAST,
|
|
||||||
} virStoragePoolType;
|
|
||||||
@@ -454,6 +455,7 @@ VIR_ENUM_DECL(virStoragePartedFs);
|
|
||||||
VIR_CONNECT_LIST_STORAGE_POOLS_SCSI | \
|
|
||||||
VIR_CONNECT_LIST_STORAGE_POOLS_MPATH | \
|
|
||||||
VIR_CONNECT_LIST_STORAGE_POOLS_RBD | \
|
|
||||||
+ VIR_CONNECT_LIST_STORAGE_POOLS_VITASTOR | \
|
|
||||||
VIR_CONNECT_LIST_STORAGE_POOLS_SHEEPDOG | \
|
|
||||||
VIR_CONNECT_LIST_STORAGE_POOLS_GLUSTER | \
|
|
||||||
VIR_CONNECT_LIST_STORAGE_POOLS_ZFS | \
|
|
||||||
diff --git a/src/conf/storage_source_conf.c b/src/conf/storage_source_conf.c
|
|
||||||
index 959ec5ed40..e751dd4d6a 100644
|
|
||||||
--- a/src/conf/storage_source_conf.c
|
|
||||||
+++ b/src/conf/storage_source_conf.c
|
|
||||||
@@ -88,6 +88,7 @@ VIR_ENUM_IMPL(virStorageNetProtocol,
|
|
||||||
"ssh",
|
|
||||||
"vxhs",
|
|
||||||
"nfs",
|
|
||||||
+ "vitastor",
|
|
||||||
);
|
|
||||||
|
|
||||||
|
|
||||||
@@ -1301,6 +1302,7 @@ virStorageSourceNetworkDefaultPort(virStorageNetProtocol protocol)
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_GLUSTER:
|
|
||||||
return 24007;
|
|
||||||
|
|
||||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_RBD:
|
|
||||||
/* we don't provide a default for RBD */
|
|
||||||
return 0;
|
|
||||||
diff --git a/src/conf/storage_source_conf.h b/src/conf/storage_source_conf.h
|
|
||||||
index 05b4bda16c..b5ed143c39 100644
|
|
||||||
--- a/src/conf/storage_source_conf.h
|
|
||||||
+++ b/src/conf/storage_source_conf.h
|
|
||||||
@@ -129,6 +129,7 @@ typedef enum {
|
|
||||||
VIR_STORAGE_NET_PROTOCOL_SSH,
|
|
||||||
VIR_STORAGE_NET_PROTOCOL_VXHS,
|
|
||||||
VIR_STORAGE_NET_PROTOCOL_NFS,
|
|
||||||
+ VIR_STORAGE_NET_PROTOCOL_VITASTOR,
|
|
||||||
|
|
||||||
VIR_STORAGE_NET_PROTOCOL_LAST
|
|
||||||
} virStorageNetProtocol;
|
|
||||||
diff --git a/src/conf/virstorageobj.c b/src/conf/virstorageobj.c
|
|
||||||
index 59fa5da372..4739167f5f 100644
|
|
||||||
--- a/src/conf/virstorageobj.c
|
|
||||||
+++ b/src/conf/virstorageobj.c
|
|
||||||
@@ -1438,6 +1438,7 @@ virStoragePoolObjSourceFindDuplicateCb(const void *payload,
|
|
||||||
return 1;
|
|
||||||
break;
|
|
||||||
|
|
||||||
+ case VIR_STORAGE_POOL_VITASTOR:
|
|
||||||
case VIR_STORAGE_POOL_ISCSI_DIRECT:
|
|
||||||
case VIR_STORAGE_POOL_RBD:
|
|
||||||
case VIR_STORAGE_POOL_LAST:
|
|
||||||
@@ -1921,6 +1922,8 @@ virStoragePoolObjMatch(virStoragePoolObj *obj,
|
|
||||||
(obj->def->type == VIR_STORAGE_POOL_MPATH)) ||
|
|
||||||
(MATCH(VIR_CONNECT_LIST_STORAGE_POOLS_RBD) &&
|
|
||||||
(obj->def->type == VIR_STORAGE_POOL_RBD)) ||
|
|
||||||
+ (MATCH(VIR_CONNECT_LIST_STORAGE_POOLS_VITASTOR) &&
|
|
||||||
+ (obj->def->type == VIR_STORAGE_POOL_VITASTOR)) ||
|
|
||||||
(MATCH(VIR_CONNECT_LIST_STORAGE_POOLS_SHEEPDOG) &&
|
|
||||||
(obj->def->type == VIR_STORAGE_POOL_SHEEPDOG)) ||
|
|
||||||
(MATCH(VIR_CONNECT_LIST_STORAGE_POOLS_GLUSTER) &&
|
|
||||||
diff --git a/src/libvirt-storage.c b/src/libvirt-storage.c
|
|
||||||
index db7660aac4..561df34709 100644
|
|
||||||
--- a/src/libvirt-storage.c
|
|
||||||
+++ b/src/libvirt-storage.c
|
|
||||||
@@ -94,6 +94,7 @@ virStoragePoolGetConnect(virStoragePoolPtr pool)
|
|
||||||
* VIR_CONNECT_LIST_STORAGE_POOLS_SCSI
|
|
||||||
* VIR_CONNECT_LIST_STORAGE_POOLS_MPATH
|
|
||||||
* VIR_CONNECT_LIST_STORAGE_POOLS_RBD
|
|
||||||
+ * VIR_CONNECT_LIST_STORAGE_POOLS_VITASTOR
|
|
||||||
* VIR_CONNECT_LIST_STORAGE_POOLS_SHEEPDOG
|
|
||||||
* VIR_CONNECT_LIST_STORAGE_POOLS_GLUSTER
|
|
||||||
* VIR_CONNECT_LIST_STORAGE_POOLS_ZFS
|
|
||||||
diff --git a/src/libxl/libxl_conf.c b/src/libxl/libxl_conf.c
|
|
||||||
index 62e1be6672..71a1d42896 100644
|
|
||||||
--- a/src/libxl/libxl_conf.c
|
|
||||||
+++ b/src/libxl/libxl_conf.c
|
|
||||||
@@ -979,6 +979,7 @@ libxlMakeNetworkDiskSrcStr(virStorageSource *src,
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_SSH:
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_VXHS:
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_NFS:
|
|
||||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_LAST:
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_NONE:
|
|
||||||
virReportError(VIR_ERR_NO_SUPPORT,
|
|
||||||
diff --git a/src/libxl/xen_xl.c b/src/libxl/xen_xl.c
|
|
||||||
index 53f6871efc..c34b8cee1a 100644
|
|
||||||
--- a/src/libxl/xen_xl.c
|
|
||||||
+++ b/src/libxl/xen_xl.c
|
|
||||||
@@ -1456,6 +1456,7 @@ xenFormatXLDiskSrcNet(virStorageSource *src)
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_SSH:
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_VXHS:
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_NFS:
|
|
||||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_LAST:
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_NONE:
|
|
||||||
virReportError(VIR_ERR_NO_SUPPORT,
|
|
||||||
diff --git a/src/qemu/qemu_block.c b/src/qemu/qemu_block.c
|
|
||||||
index 738b72d7ea..5dd082fc89 100644
|
|
||||||
--- a/src/qemu/qemu_block.c
|
|
||||||
+++ b/src/qemu/qemu_block.c
|
|
||||||
@@ -758,6 +758,38 @@ qemuBlockStorageSourceGetRBDProps(virStorageSource *src,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
+static virJSONValue *
|
|
||||||
+qemuBlockStorageSourceGetVitastorProps(virStorageSource *src)
|
|
||||||
+{
|
|
||||||
+ virJSONValue *ret = NULL;
|
|
||||||
+ virStorageNetHostDef *host;
|
|
||||||
+ size_t i;
|
|
||||||
+ g_auto(virBuffer) buf = VIR_BUFFER_INITIALIZER;
|
|
||||||
+ g_autofree char *etcd = NULL;
|
|
||||||
+
|
|
||||||
+ for (i = 0; i < src->nhosts; i++) {
|
|
||||||
+ host = src->hosts + i;
|
|
||||||
+ if ((virStorageNetHostTransport)host->transport != VIR_STORAGE_NET_HOST_TRANS_TCP) {
|
|
||||||
+ return NULL;
|
|
||||||
+ }
|
|
||||||
+ virBufferAsprintf(&buf, i > 0 ? ",%s:%u" : "%s:%u", host->name, host->port);
|
|
||||||
+ }
|
|
||||||
+ if (src->nhosts > 0) {
|
|
||||||
+ etcd = virBufferContentAndReset(&buf);
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
+ if (virJSONValueObjectAdd(&ret,
|
|
||||||
+ "S:etcd-host", etcd,
|
|
||||||
+ "S:etcd-prefix", src->query,
|
|
||||||
+ "S:config-path", src->configFile,
|
|
||||||
+ "s:image", src->path,
|
|
||||||
+ NULL) < 0)
|
|
||||||
+ return NULL;
|
|
||||||
+
|
|
||||||
+ return ret;
|
|
||||||
+}
|
|
||||||
+
|
|
||||||
+
|
|
||||||
static virJSONValue *
|
|
||||||
qemuBlockStorageSourceGetSheepdogProps(virStorageSource *src)
|
|
||||||
{
|
|
||||||
@@ -1140,6 +1172,12 @@ qemuBlockStorageSourceGetBackendProps(virStorageSource *src,
|
|
||||||
return NULL;
|
|
||||||
break;
|
|
||||||
|
|
||||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
|
||||||
+ driver = "vitastor";
|
|
||||||
+ if (!(fileprops = qemuBlockStorageSourceGetVitastorProps(src)))
|
|
||||||
+ return NULL;
|
|
||||||
+ break;
|
|
||||||
+
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
|
||||||
driver = "sheepdog";
|
|
||||||
if (!(fileprops = qemuBlockStorageSourceGetSheepdogProps(src)))
|
|
||||||
@@ -2020,6 +2058,7 @@ qemuBlockGetBackingStoreString(virStorageSource *src,
|
|
||||||
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_RBD:
|
|
||||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_VXHS:
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_NFS:
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_SSH:
|
|
||||||
@@ -2400,6 +2439,12 @@ qemuBlockStorageSourceCreateGetStorageProps(virStorageSource *src,
|
|
||||||
return -1;
|
|
||||||
break;
|
|
||||||
|
|
||||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
|
||||||
+ driver = "vitastor";
|
|
||||||
+ if (!(location = qemuBlockStorageSourceGetVitastorProps(src)))
|
|
||||||
+ return -1;
|
|
||||||
+ break;
|
|
||||||
+
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
|
||||||
driver = "sheepdog";
|
|
||||||
if (!(location = qemuBlockStorageSourceGetSheepdogProps(src)))
|
|
||||||
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
|
|
||||||
index bda62f2e5c..84b4e5f2b8 100644
|
|
||||||
--- a/src/qemu/qemu_domain.c
|
|
||||||
+++ b/src/qemu/qemu_domain.c
|
|
||||||
@@ -5260,7 +5260,8 @@ qemuDomainValidateStorageSource(virStorageSource *src,
|
|
||||||
if (src->query &&
|
|
||||||
(actualType != VIR_STORAGE_TYPE_NETWORK ||
|
|
||||||
(src->protocol != VIR_STORAGE_NET_PROTOCOL_HTTPS &&
|
|
||||||
- src->protocol != VIR_STORAGE_NET_PROTOCOL_HTTP))) {
|
|
||||||
+ src->protocol != VIR_STORAGE_NET_PROTOCOL_HTTP &&
|
|
||||||
+ src->protocol != VIR_STORAGE_NET_PROTOCOL_VITASTOR))) {
|
|
||||||
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
||||||
_("query is supported only with HTTP(S) protocols"));
|
|
||||||
return -1;
|
|
||||||
@@ -10514,6 +10515,7 @@ qemuDomainPrepareStorageSourceTLS(virStorageSource *src,
|
|
||||||
break;
|
|
||||||
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_RBD:
|
|
||||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_GLUSTER:
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_ISCSI:
|
|
||||||
diff --git a/src/qemu/qemu_snapshot.c b/src/qemu/qemu_snapshot.c
|
|
||||||
index f5260c4a22..2f9d8406fe 100644
|
|
||||||
--- a/src/qemu/qemu_snapshot.c
|
|
||||||
+++ b/src/qemu/qemu_snapshot.c
|
|
||||||
@@ -423,6 +423,7 @@ qemuSnapshotPrepareDiskExternalInactive(virDomainSnapshotDiskDef *snapdisk,
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_NONE:
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_NBD:
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_RBD:
|
|
||||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_GLUSTER:
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_ISCSI:
|
|
||||||
@@ -648,6 +649,7 @@ qemuSnapshotPrepareDiskInternal(virDomainDiskDef *disk,
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_NONE:
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_NBD:
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_RBD:
|
|
||||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_GLUSTER:
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_ISCSI:
|
|
||||||
diff --git a/src/storage/storage_driver.c b/src/storage/storage_driver.c
|
|
||||||
index 86c03762d2..630c6eff1a 100644
|
|
||||||
--- a/src/storage/storage_driver.c
|
|
||||||
+++ b/src/storage/storage_driver.c
|
|
||||||
@@ -1626,6 +1626,7 @@ storageVolLookupByPathCallback(virStoragePoolObj *obj,
|
|
||||||
|
|
||||||
case VIR_STORAGE_POOL_GLUSTER:
|
|
||||||
case VIR_STORAGE_POOL_RBD:
|
|
||||||
+ case VIR_STORAGE_POOL_VITASTOR:
|
|
||||||
case VIR_STORAGE_POOL_SHEEPDOG:
|
|
||||||
case VIR_STORAGE_POOL_ZFS:
|
|
||||||
case VIR_STORAGE_POOL_LAST:
|
|
||||||
diff --git a/src/storage_file/storage_source_backingstore.c b/src/storage_file/storage_source_backingstore.c
|
|
||||||
index 80681924ea..8a3ade9ec0 100644
|
|
||||||
--- a/src/storage_file/storage_source_backingstore.c
|
|
||||||
+++ b/src/storage_file/storage_source_backingstore.c
|
|
||||||
@@ -287,6 +287,75 @@ virStorageSourceParseRBDColonString(const char *rbdstr,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
+static int
|
|
||||||
+virStorageSourceParseVitastorColonString(const char *colonstr,
|
|
||||||
+ virStorageSource *src)
|
|
||||||
+{
|
|
||||||
+ char *p, *e, *next;
|
|
||||||
+ g_autofree char *options = NULL;
|
|
||||||
+
|
|
||||||
+ /* optionally skip the "vitastor:" prefix if provided */
|
|
||||||
+ if (STRPREFIX(colonstr, "vitastor:"))
|
|
||||||
+ colonstr += strlen("vitastor:");
|
|
||||||
+
|
|
||||||
+ options = g_strdup(colonstr);
|
|
||||||
+
|
|
||||||
+ p = options;
|
|
||||||
+ while (*p) {
|
|
||||||
+ /* find : delimiter or end of string */
|
|
||||||
+ for (e = p; *e && *e != ':'; ++e) {
|
|
||||||
+ if (*e == '\\') {
|
|
||||||
+ e++;
|
|
||||||
+ if (*e == '\0')
|
|
||||||
+ break;
|
|
||||||
+ }
|
|
||||||
+ }
|
|
||||||
+ if (*e == '\0') {
|
|
||||||
+ next = e; /* last kv pair */
|
|
||||||
+ } else {
|
|
||||||
+ next = e + 1;
|
|
||||||
+ *e = '\0';
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
+ if (STRPREFIX(p, "image=")) {
|
|
||||||
+ src->path = g_strdup(p + strlen("image="));
|
|
||||||
+ } else if (STRPREFIX(p, "etcd-prefix=")) {
|
|
||||||
+ src->query = g_strdup(p + strlen("etcd-prefix="));
|
|
||||||
+ } else if (STRPREFIX(p, "config-path=")) {
|
|
||||||
+ src->configFile = g_strdup(p + strlen("config-path="));
|
|
||||||
+ } else if (STRPREFIX(p, "etcd-host=")) {
|
|
||||||
+ char *h, *sep;
|
|
||||||
+
|
|
||||||
+ h = p + strlen("etcd-host=");
|
|
||||||
+ while (h < e) {
|
|
||||||
+ for (sep = h; sep < e; ++sep) {
|
|
||||||
+ if (*sep == '\\' && (sep[1] == ',' ||
|
|
||||||
+ sep[1] == ';' ||
|
|
||||||
+ sep[1] == ' ')) {
|
|
||||||
+ *sep = '\0';
|
|
||||||
+ sep += 2;
|
|
||||||
+ break;
|
|
||||||
+ }
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
+ if (virStorageSourceRBDAddHost(src, h) < 0)
|
|
||||||
+ return -1;
|
|
||||||
+
|
|
||||||
+ h = sep;
|
|
||||||
+ }
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
+ p = next;
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
+ if (!src->path) {
|
|
||||||
+ return -1;
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
+ return 0;
|
|
||||||
+}
|
|
||||||
+
|
|
||||||
+
|
|
||||||
static int
|
|
||||||
virStorageSourceParseNBDColonString(const char *nbdstr,
|
|
||||||
virStorageSource *src)
|
|
||||||
@@ -399,6 +468,11 @@ virStorageSourceParseBackingColon(virStorageSource *src,
|
|
||||||
return -1;
|
|
||||||
break;
|
|
||||||
|
|
||||||
+ case VIR_STORAGE_NET_PROTOCOL_VITASTOR:
|
|
||||||
+ if (virStorageSourceParseVitastorColonString(path, src) < 0)
|
|
||||||
+ return -1;
|
|
||||||
+ break;
|
|
||||||
+
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_LAST:
|
|
||||||
case VIR_STORAGE_NET_PROTOCOL_NONE:
|
|
||||||
@@ -975,6 +1049,54 @@ virStorageSourceParseBackingJSONRBD(virStorageSource *src,
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
+static int
|
|
||||||
+virStorageSourceParseBackingJSONVitastor(virStorageSource *src,
|
|
||||||
+ virJSONValue *json,
|
|
||||||
+ const char *jsonstr G_GNUC_UNUSED,
|
|
||||||
+ int opaque G_GNUC_UNUSED)
|
|
||||||
+{
|
|
||||||
+ const char *filename;
|
|
||||||
+ const char *image = virJSONValueObjectGetString(json, "image");
|
|
||||||
+ const char *conf = virJSONValueObjectGetString(json, "config-path");
|
|
||||||
+ const char *etcd_prefix = virJSONValueObjectGetString(json, "etcd-prefix");
|
|
||||||
+ virJSONValue *servers = virJSONValueObjectGetArray(json, "server");
|
|
||||||
+ size_t nservers;
|
|
||||||
+ size_t i;
|
|
||||||
+
|
|
||||||
+ src->type = VIR_STORAGE_TYPE_NETWORK;
|
|
||||||
+ src->protocol = VIR_STORAGE_NET_PROTOCOL_VITASTOR;
|
|
||||||
+
|
|
||||||
+ /* legacy syntax passed via 'filename' option */
|
|
||||||
+ if ((filename = virJSONValueObjectGetString(json, "filename")))
|
|
||||||
+ return virStorageSourceParseVitastorColonString(filename, src);
|
|
||||||
+
|
|
||||||
+ if (!image) {
|
|
||||||
+ virReportError(VIR_ERR_INVALID_ARG, "%s",
|
|
||||||
+ _("missing image name in Vitastor backing volume "
|
|
||||||
+ "JSON specification"));
|
|
||||||
+ return -1;
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
+ src->path = g_strdup(image);
|
|
||||||
+ src->configFile = g_strdup(conf);
|
|
||||||
+ src->query = g_strdup(etcd_prefix);
|
|
||||||
+
|
|
||||||
+ if (servers) {
|
|
||||||
+ nservers = virJSONValueArraySize(servers);
|
|
||||||
+
|
|
||||||
+ src->hosts = g_new0(virStorageNetHostDef, nservers);
|
|
||||||
+ src->nhosts = nservers;
|
|
||||||
+
|
|
||||||
+ for (i = 0; i < nservers; i++) {
|
|
||||||
+ if (virStorageSourceParseBackingJSONInetSocketAddress(src->hosts + i,
|
|
||||||
+ virJSONValueArrayGet(servers, i)) < 0)
|
|
||||||
+ return -1;
|
|
||||||
+ }
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
+ return 0;
|
|
||||||
+}
|
|
||||||
+
|
|
||||||
static int
|
|
||||||
virStorageSourceParseBackingJSONRaw(virStorageSource *src,
|
|
||||||
virJSONValue *json,
|
|
||||||
@@ -1152,6 +1274,7 @@ static const struct virStorageSourceJSONDriverParser jsonParsers[] = {
|
|
||||||
{"sheepdog", false, virStorageSourceParseBackingJSONSheepdog, 0},
|
|
||||||
{"ssh", false, virStorageSourceParseBackingJSONSSH, 0},
|
|
||||||
{"rbd", false, virStorageSourceParseBackingJSONRBD, 0},
|
|
||||||
+ {"vitastor", false, virStorageSourceParseBackingJSONVitastor, 0},
|
|
||||||
{"raw", true, virStorageSourceParseBackingJSONRaw, 0},
|
|
||||||
{"nfs", false, virStorageSourceParseBackingJSONNFS, 0},
|
|
||||||
{"vxhs", false, virStorageSourceParseBackingJSONVxHS, 0},
|
|
||||||
diff --git a/src/test/test_driver.c b/src/test/test_driver.c
|
|
||||||
index d2d1bc43e3..31a92e4a01 100644
|
|
||||||
--- a/src/test/test_driver.c
|
|
||||||
+++ b/src/test/test_driver.c
|
|
||||||
@@ -7339,6 +7339,7 @@ testStorageVolumeTypeForPool(int pooltype)
|
|
||||||
case VIR_STORAGE_POOL_ISCSI_DIRECT:
|
|
||||||
case VIR_STORAGE_POOL_GLUSTER:
|
|
||||||
case VIR_STORAGE_POOL_RBD:
|
|
||||||
+ case VIR_STORAGE_POOL_VITASTOR:
|
|
||||||
return VIR_STORAGE_VOL_NETWORK;
|
|
||||||
case VIR_STORAGE_POOL_LOGICAL:
|
|
||||||
case VIR_STORAGE_POOL_DISK:
|
|
||||||
diff --git a/tests/storagepoolcapsschemadata/poolcaps-fs.xml b/tests/storagepoolcapsschemadata/poolcaps-fs.xml
|
|
||||||
index eee75af746..8bd0a57bdd 100644
|
|
||||||
--- a/tests/storagepoolcapsschemadata/poolcaps-fs.xml
|
|
||||||
+++ b/tests/storagepoolcapsschemadata/poolcaps-fs.xml
|
|
||||||
@@ -204,4 +204,11 @@
|
|
||||||
</enum>
|
|
||||||
</volOptions>
|
|
||||||
</pool>
|
|
||||||
+ <pool type='vitastor' supported='no'>
|
|
||||||
+ <volOptions>
|
|
||||||
+ <defaultFormat type='raw'/>
|
|
||||||
+ <enum name='targetFormatType'>
|
|
||||||
+ </enum>
|
|
||||||
+ </volOptions>
|
|
||||||
+ </pool>
|
|
||||||
</storagepoolCapabilities>
|
|
||||||
diff --git a/tests/storagepoolcapsschemadata/poolcaps-full.xml b/tests/storagepoolcapsschemadata/poolcaps-full.xml
|
|
||||||
index 805950a937..852df0de16 100644
|
|
||||||
--- a/tests/storagepoolcapsschemadata/poolcaps-full.xml
|
|
||||||
+++ b/tests/storagepoolcapsschemadata/poolcaps-full.xml
|
|
||||||
@@ -204,4 +204,11 @@
|
|
||||||
</enum>
|
|
||||||
</volOptions>
|
|
||||||
</pool>
|
|
||||||
+ <pool type='vitastor' supported='yes'>
|
|
||||||
+ <volOptions>
|
|
||||||
+ <defaultFormat type='raw'/>
|
|
||||||
+ <enum name='targetFormatType'>
|
|
||||||
+ </enum>
|
|
||||||
+ </volOptions>
|
|
||||||
+ </pool>
|
|
||||||
</storagepoolCapabilities>
|
|
||||||
diff --git a/tests/storagepoolxml2argvtest.c b/tests/storagepoolxml2argvtest.c
|
|
||||||
index e8e40d695e..db55fe5f3a 100644
|
|
||||||
--- a/tests/storagepoolxml2argvtest.c
|
|
||||||
+++ b/tests/storagepoolxml2argvtest.c
|
|
||||||
@@ -65,6 +65,7 @@ testCompareXMLToArgvFiles(bool shouldFail,
|
|
||||||
case VIR_STORAGE_POOL_GLUSTER:
|
|
||||||
case VIR_STORAGE_POOL_ZFS:
|
|
||||||
case VIR_STORAGE_POOL_VSTORAGE:
|
|
||||||
+ case VIR_STORAGE_POOL_VITASTOR:
|
|
||||||
case VIR_STORAGE_POOL_LAST:
|
|
||||||
default:
|
|
||||||
VIR_TEST_DEBUG("pool type '%s' has no xml2argv test", defTypeStr);
|
|
||||||
diff --git a/tools/virsh-pool.c b/tools/virsh-pool.c
|
|
||||||
index f9aad8ded0..64704b4288 100644
|
|
||||||
--- a/tools/virsh-pool.c
|
|
||||||
+++ b/tools/virsh-pool.c
|
|
||||||
@@ -1187,6 +1187,9 @@ cmdPoolList(vshControl *ctl, const vshCmd *cmd G_GNUC_UNUSED)
|
|
||||||
case VIR_STORAGE_POOL_VSTORAGE:
|
|
||||||
flags |= VIR_CONNECT_LIST_STORAGE_POOLS_VSTORAGE;
|
|
||||||
break;
|
|
||||||
+ case VIR_STORAGE_POOL_VITASTOR:
|
|
||||||
+ flags |= VIR_CONNECT_LIST_STORAGE_POOLS_VITASTOR;
|
|
||||||
+ break;
|
|
||||||
case VIR_STORAGE_POOL_LAST:
|
|
||||||
break;
|
|
||||||
}
|
|
|
@ -15,6 +15,106 @@
|
||||||
#include "msgr_rdma.h"
|
#include "msgr_rdma.h"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#include <sys/poll.h>
|
||||||
|
|
||||||
|
msgr_iothread_t::msgr_iothread_t():
|
||||||
|
ring(RINGLOOP_DEFAULT_SIZE),
|
||||||
|
thread(&msgr_iothread_t::run, this)
|
||||||
|
{
|
||||||
|
eventfd = ring.register_eventfd();
|
||||||
|
if (eventfd < 0)
|
||||||
|
{
|
||||||
|
throw std::runtime_error(std::string("failed to register eventfd: ") + strerror(-eventfd));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
msgr_iothread_t::~msgr_iothread_t()
|
||||||
|
{
|
||||||
|
stop();
|
||||||
|
}
|
||||||
|
|
||||||
|
void msgr_iothread_t::add_sqe(io_uring_sqe & sqe)
|
||||||
|
{
|
||||||
|
mu.lock();
|
||||||
|
queue.push_back((iothread_sqe_t){ .sqe = sqe, .data = std::move(*(ring_data_t*)sqe.user_data) });
|
||||||
|
if (queue.size() == 1)
|
||||||
|
{
|
||||||
|
cond.notify_all();
|
||||||
|
}
|
||||||
|
mu.unlock();
|
||||||
|
}
|
||||||
|
|
||||||
|
void msgr_iothread_t::stop()
|
||||||
|
{
|
||||||
|
mu.lock();
|
||||||
|
if (stopped)
|
||||||
|
{
|
||||||
|
mu.unlock();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
stopped = true;
|
||||||
|
if (outer_loop_data)
|
||||||
|
{
|
||||||
|
outer_loop_data->callback = [](ring_data_t*){};
|
||||||
|
}
|
||||||
|
cond.notify_all();
|
||||||
|
close(eventfd);
|
||||||
|
mu.unlock();
|
||||||
|
thread.join();
|
||||||
|
}
|
||||||
|
|
||||||
|
void msgr_iothread_t::add_to_ringloop(ring_loop_t *outer_loop)
|
||||||
|
{
|
||||||
|
assert(!this->outer_loop || this->outer_loop == outer_loop);
|
||||||
|
io_uring_sqe *sqe = outer_loop->get_sqe();
|
||||||
|
assert(sqe != NULL);
|
||||||
|
this->outer_loop = outer_loop;
|
||||||
|
this->outer_loop_data = ((ring_data_t*)sqe->user_data);
|
||||||
|
my_uring_prep_poll_add(sqe, eventfd, POLLIN);
|
||||||
|
outer_loop_data->callback = [this](ring_data_t *data)
|
||||||
|
{
|
||||||
|
if (data->res < 0)
|
||||||
|
{
|
||||||
|
throw std::runtime_error(std::string("eventfd poll failed: ") + strerror(-data->res));
|
||||||
|
}
|
||||||
|
outer_loop_data = NULL;
|
||||||
|
if (stopped)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
add_to_ringloop(this->outer_loop);
|
||||||
|
ring.loop();
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
void msgr_iothread_t::run()
|
||||||
|
{
|
||||||
|
while (true)
|
||||||
|
{
|
||||||
|
{
|
||||||
|
std::unique_lock<std::mutex> lk(mu);
|
||||||
|
while (!stopped && !queue.size())
|
||||||
|
cond.wait(lk);
|
||||||
|
if (stopped)
|
||||||
|
return;
|
||||||
|
int i = 0;
|
||||||
|
for (; i < queue.size(); i++)
|
||||||
|
{
|
||||||
|
io_uring_sqe *sqe = ring.get_sqe();
|
||||||
|
if (!sqe)
|
||||||
|
break;
|
||||||
|
ring_data_t *data = ((ring_data_t*)sqe->user_data);
|
||||||
|
*data = std::move(queue[i].data);
|
||||||
|
*sqe = queue[i].sqe;
|
||||||
|
sqe->user_data = (uint64_t)data;
|
||||||
|
}
|
||||||
|
queue.erase(queue.begin(), queue.begin()+i);
|
||||||
|
}
|
||||||
|
// We only want to offload sendmsg/recvmsg. Callbacks will be called in main thread
|
||||||
|
ring.submit();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void osd_messenger_t::init()
|
void osd_messenger_t::init()
|
||||||
{
|
{
|
||||||
#ifdef WITH_RDMA
|
#ifdef WITH_RDMA
|
||||||
|
@ -43,6 +143,15 @@ void osd_messenger_t::init()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
if (ringloop && iothread_count > 0)
|
||||||
|
{
|
||||||
|
for (int i = 0; i < iothread_count; i++)
|
||||||
|
{
|
||||||
|
auto iot = new msgr_iothread_t();
|
||||||
|
iothreads.push_back(iot);
|
||||||
|
iot->add_to_ringloop(ringloop);
|
||||||
|
}
|
||||||
|
}
|
||||||
keepalive_timer_id = tfd->set_timer(1000, true, [this](int)
|
keepalive_timer_id = tfd->set_timer(1000, true, [this](int)
|
||||||
{
|
{
|
||||||
auto cl_it = clients.begin();
|
auto cl_it = clients.begin();
|
||||||
|
@ -129,6 +238,14 @@ osd_messenger_t::~osd_messenger_t()
|
||||||
{
|
{
|
||||||
stop_client(clients.begin()->first, true, true);
|
stop_client(clients.begin()->first, true, true);
|
||||||
}
|
}
|
||||||
|
if (iothreads.size())
|
||||||
|
{
|
||||||
|
for (auto iot: iothreads)
|
||||||
|
{
|
||||||
|
delete iot;
|
||||||
|
}
|
||||||
|
iothreads.clear();
|
||||||
|
}
|
||||||
#ifdef WITH_RDMA
|
#ifdef WITH_RDMA
|
||||||
if (rdma_context)
|
if (rdma_context)
|
||||||
{
|
{
|
||||||
|
|
|
@ -111,6 +111,44 @@ struct osd_op_stats_t
|
||||||
uint64_t subop_stat_count[OSD_OP_MAX+1] = { 0 };
|
uint64_t subop_stat_count[OSD_OP_MAX+1] = { 0 };
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#include <mutex>
|
||||||
|
#include <condition_variable>
|
||||||
|
#include <thread>
|
||||||
|
|
||||||
|
#ifdef __MOCK__
|
||||||
|
class msgr_iothread_t;
|
||||||
|
#else
|
||||||
|
struct iothread_sqe_t
|
||||||
|
{
|
||||||
|
io_uring_sqe sqe;
|
||||||
|
ring_data_t data;
|
||||||
|
};
|
||||||
|
|
||||||
|
class msgr_iothread_t
|
||||||
|
{
|
||||||
|
protected:
|
||||||
|
ring_loop_t ring;
|
||||||
|
ring_loop_t *outer_loop = NULL;
|
||||||
|
ring_data_t *outer_loop_data = NULL;
|
||||||
|
int eventfd = -1;
|
||||||
|
bool stopped = false;
|
||||||
|
std::mutex mu;
|
||||||
|
std::condition_variable cond;
|
||||||
|
std::vector<iothread_sqe_t> queue;
|
||||||
|
std::thread thread;
|
||||||
|
|
||||||
|
void run();
|
||||||
|
public:
|
||||||
|
|
||||||
|
msgr_iothread_t();
|
||||||
|
~msgr_iothread_t();
|
||||||
|
|
||||||
|
void add_sqe(io_uring_sqe & sqe);
|
||||||
|
void stop();
|
||||||
|
void add_to_ringloop(ring_loop_t *outer_loop);
|
||||||
|
};
|
||||||
|
#endif
|
||||||
|
|
||||||
struct osd_messenger_t
|
struct osd_messenger_t
|
||||||
{
|
{
|
||||||
protected:
|
protected:
|
||||||
|
@ -123,6 +161,7 @@ protected:
|
||||||
int osd_ping_timeout = 0;
|
int osd_ping_timeout = 0;
|
||||||
int log_level = 0;
|
int log_level = 0;
|
||||||
bool use_sync_send_recv = false;
|
bool use_sync_send_recv = false;
|
||||||
|
int iothread_count = 4;
|
||||||
|
|
||||||
#ifdef WITH_RDMA
|
#ifdef WITH_RDMA
|
||||||
bool use_rdma = true;
|
bool use_rdma = true;
|
||||||
|
@ -134,6 +173,7 @@ protected:
|
||||||
bool rdma_odp = false;
|
bool rdma_odp = false;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
std::vector<msgr_iothread_t*> iothreads;
|
||||||
std::vector<int> read_ready_clients;
|
std::vector<int> read_ready_clients;
|
||||||
std::vector<int> write_ready_clients;
|
std::vector<int> write_ready_clients;
|
||||||
// We don't use ringloop->set_immediate here because we may have no ringloop in client :)
|
// We don't use ringloop->set_immediate here because we may have no ringloop in client :)
|
||||||
|
|
|
@ -30,7 +30,11 @@ void osd_messenger_t::read_requests()
|
||||||
cl->refs++;
|
cl->refs++;
|
||||||
if (ringloop && !use_sync_send_recv)
|
if (ringloop && !use_sync_send_recv)
|
||||||
{
|
{
|
||||||
io_uring_sqe* sqe = ringloop->get_sqe();
|
auto iothread = iothreads.size() ? iothreads[peer_fd % iothreads.size()] : NULL;
|
||||||
|
io_uring_sqe sqe_local;
|
||||||
|
ring_data_t data_local;
|
||||||
|
sqe_local.user_data = (uint64_t)&data_local;
|
||||||
|
io_uring_sqe* sqe = (iothread ? &sqe_local : ringloop->get_sqe());
|
||||||
if (!sqe)
|
if (!sqe)
|
||||||
{
|
{
|
||||||
cl->read_msg.msg_iovlen = 0;
|
cl->read_msg.msg_iovlen = 0;
|
||||||
|
@ -40,6 +44,10 @@ void osd_messenger_t::read_requests()
|
||||||
ring_data_t* data = ((ring_data_t*)sqe->user_data);
|
ring_data_t* data = ((ring_data_t*)sqe->user_data);
|
||||||
data->callback = [this, cl](ring_data_t *data) { handle_read(data->res, cl); };
|
data->callback = [this, cl](ring_data_t *data) { handle_read(data->res, cl); };
|
||||||
my_uring_prep_recvmsg(sqe, peer_fd, &cl->read_msg, 0);
|
my_uring_prep_recvmsg(sqe, peer_fd, &cl->read_msg, 0);
|
||||||
|
if (iothread)
|
||||||
|
{
|
||||||
|
iothread->add_sqe(sqe_local);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
|
|
@ -189,7 +189,11 @@ bool osd_messenger_t::try_send(osd_client_t *cl)
|
||||||
}
|
}
|
||||||
if (ringloop && !use_sync_send_recv)
|
if (ringloop && !use_sync_send_recv)
|
||||||
{
|
{
|
||||||
io_uring_sqe* sqe = ringloop->get_sqe();
|
auto iothread = iothreads.size() ? iothreads[peer_fd % iothreads.size()] : NULL;
|
||||||
|
io_uring_sqe sqe_local;
|
||||||
|
ring_data_t data_local;
|
||||||
|
sqe_local.user_data = (uint64_t)&data_local;
|
||||||
|
io_uring_sqe* sqe = (iothread ? &sqe_local : ringloop->get_sqe());
|
||||||
if (!sqe)
|
if (!sqe)
|
||||||
{
|
{
|
||||||
return false;
|
return false;
|
||||||
|
@ -200,6 +204,10 @@ bool osd_messenger_t::try_send(osd_client_t *cl)
|
||||||
ring_data_t* data = ((ring_data_t*)sqe->user_data);
|
ring_data_t* data = ((ring_data_t*)sqe->user_data);
|
||||||
data->callback = [this, cl](ring_data_t *data) { handle_send(data->res, cl); };
|
data->callback = [this, cl](ring_data_t *data) { handle_send(data->res, cl); };
|
||||||
my_uring_prep_sendmsg(sqe, peer_fd, &cl->write_msg, 0);
|
my_uring_prep_sendmsg(sqe, peer_fd, &cl->write_msg, 0);
|
||||||
|
if (iothread)
|
||||||
|
{
|
||||||
|
iothread->add_sqe(sqe_local);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
|
|
@ -79,6 +79,7 @@ void ring_loop_t::loop()
|
||||||
struct io_uring_cqe *cqe;
|
struct io_uring_cqe *cqe;
|
||||||
while (!io_uring_peek_cqe(&ring, &cqe))
|
while (!io_uring_peek_cqe(&ring, &cqe))
|
||||||
{
|
{
|
||||||
|
mu.lock();
|
||||||
struct ring_data_t *d = (struct ring_data_t*)cqe->user_data;
|
struct ring_data_t *d = (struct ring_data_t*)cqe->user_data;
|
||||||
if (d->callback)
|
if (d->callback)
|
||||||
{
|
{
|
||||||
|
@ -90,12 +91,14 @@ void ring_loop_t::loop()
|
||||||
dl.res = cqe->res;
|
dl.res = cqe->res;
|
||||||
dl.callback.swap(d->callback);
|
dl.callback.swap(d->callback);
|
||||||
free_ring_data[free_ring_data_ptr++] = d - ring_datas;
|
free_ring_data[free_ring_data_ptr++] = d - ring_datas;
|
||||||
|
mu.unlock();
|
||||||
dl.callback(&dl);
|
dl.callback(&dl);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
fprintf(stderr, "Warning: empty callback in SQE\n");
|
fprintf(stderr, "Warning: empty callback in SQE\n");
|
||||||
free_ring_data[free_ring_data_ptr++] = d - ring_datas;
|
free_ring_data[free_ring_data_ptr++] = d - ring_datas;
|
||||||
|
mu.unlock();
|
||||||
}
|
}
|
||||||
io_uring_cqe_seen(&ring, cqe);
|
io_uring_cqe_seen(&ring, cqe);
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,6 +14,7 @@
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <functional>
|
#include <functional>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
#include <mutex>
|
||||||
|
|
||||||
#define RINGLOOP_DEFAULT_SIZE 1024
|
#define RINGLOOP_DEFAULT_SIZE 1024
|
||||||
|
|
||||||
|
@ -124,6 +125,7 @@ class ring_loop_t
|
||||||
std::vector<std::function<void()>> immediate_queue, immediate_queue2;
|
std::vector<std::function<void()>> immediate_queue, immediate_queue2;
|
||||||
std::vector<ring_consumer_t*> consumers;
|
std::vector<ring_consumer_t*> consumers;
|
||||||
struct ring_data_t *ring_datas;
|
struct ring_data_t *ring_datas;
|
||||||
|
std::mutex mu;
|
||||||
int *free_ring_data;
|
int *free_ring_data;
|
||||||
unsigned free_ring_data_ptr;
|
unsigned free_ring_data_ptr;
|
||||||
bool loop_again;
|
bool loop_again;
|
||||||
|
@ -138,12 +140,17 @@ public:
|
||||||
|
|
||||||
inline struct io_uring_sqe* get_sqe()
|
inline struct io_uring_sqe* get_sqe()
|
||||||
{
|
{
|
||||||
|
mu.lock();
|
||||||
if (free_ring_data_ptr == 0)
|
if (free_ring_data_ptr == 0)
|
||||||
|
{
|
||||||
|
mu.unlock();
|
||||||
return NULL;
|
return NULL;
|
||||||
|
}
|
||||||
struct io_uring_sqe* sqe = io_uring_get_sqe(&ring);
|
struct io_uring_sqe* sqe = io_uring_get_sqe(&ring);
|
||||||
assert(sqe);
|
assert(sqe);
|
||||||
*sqe = { 0 };
|
*sqe = { 0 };
|
||||||
io_uring_sqe_set_data(sqe, ring_datas + free_ring_data[--free_ring_data_ptr]);
|
io_uring_sqe_set_data(sqe, ring_datas + free_ring_data[--free_ring_data_ptr]);
|
||||||
|
mu.unlock();
|
||||||
return sqe;
|
return sqe;
|
||||||
}
|
}
|
||||||
inline void set_immediate(const std::function<void()> cb)
|
inline void set_immediate(const std::function<void()> cb)
|
||||||
|
|
Loading…
Reference in New Issue