Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
|---|---|---|---|---|---|---|
null |
ceph-main/qa/suites/rbd/cli_v1/workloads/rbd_cli_generic.yaml
|
tasks:
- workunit:
clients:
client.0:
- rbd/cli_generic.sh
| 77
| 12
| 28
|
yaml
|
null |
ceph-main/qa/suites/rbd/cli_v1/workloads/rbd_cli_import_export.yaml
|
tasks:
- workunit:
clients:
client.0:
- rbd/import_export.sh
| 79
| 12.333333
| 30
|
yaml
|
null |
ceph-main/qa/suites/rbd/encryption/cache/none.yaml
|
tasks:
- install:
- ceph:
conf:
client:
rbd cache: false
| 75
| 9.857143
| 24
|
yaml
|
null |
ceph-main/qa/suites/rbd/encryption/cache/writearound.yaml
|
tasks:
- install:
- ceph:
conf:
client:
rbd cache: true
rbd cache policy: writearound
| 112
| 13.125
| 37
|
yaml
|
null |
ceph-main/qa/suites/rbd/encryption/cache/writeback.yaml
|
tasks:
- install:
- ceph:
conf:
client:
rbd cache: true
rbd cache policy: writeback
| 110
| 12.875
| 35
|
yaml
|
null |
ceph-main/qa/suites/rbd/encryption/cache/writethrough.yaml
|
tasks:
- install:
- ceph:
conf:
client:
rbd cache: true
rbd cache max dirty: 0
| 105
| 12.25
| 30
|
yaml
|
null |
ceph-main/qa/suites/rbd/encryption/clusters/fixed-3.yaml
|
.qa/clusters/fixed-3.yaml
| 25
| 25
| 25
|
yaml
|
null |
ceph-main/qa/suites/rbd/encryption/clusters/openstack.yaml
|
openstack:
- machine:
disk: 40 # GB
ram: 30000 # MB
cpus: 1
volumes: # attached to each instance
count: 4
size: 30 # GB
| 156
| 16.444444
| 40
|
yaml
|
null |
ceph-main/qa/suites/rbd/encryption/features/defaults.yaml
|
overrides:
ceph:
conf:
client:
rbd default features: 61
| 76
| 11.833333
| 32
|
yaml
|
null |
ceph-main/qa/suites/rbd/encryption/msgr-failures/few.yaml
|
overrides:
ceph:
conf:
global:
ms inject socket failures: 5000
mon client directed command retry: 5
log-ignorelist:
- but it is still running
- \(OSD_SLOW_PING_TIME
| 205
| 19.6
| 44
|
yaml
|
null |
ceph-main/qa/suites/rbd/encryption/pool/ec-cache-pool.yaml
|
overrides:
ceph:
log-ignorelist:
- overall HEALTH_
- \(CACHE_POOL_NEAR_FULL\)
- \(CACHE_POOL_NO_HIT_SET\)
tasks:
- exec:
client.0:
- sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2
- sudo ceph osd pool delete rbd rbd --yes-i-really-really-mean-it
- sudo ceph osd pool create rbd 4 4 erasure teuthologyprofile
- sudo ceph osd pool create cache 4
- sudo ceph osd tier add rbd cache
- sudo ceph osd tier cache-mode cache writeback
- sudo ceph osd tier set-overlay rbd cache
- sudo ceph osd pool set cache hit_set_type bloom
- sudo ceph osd pool set cache hit_set_count 8
- sudo ceph osd pool set cache hit_set_period 60
- sudo ceph osd pool set cache target_max_objects 250
- rbd pool init rbd
| 833
| 36.909091
| 97
|
yaml
|
null |
ceph-main/qa/suites/rbd/encryption/pool/ec-data-pool.yaml
|
tasks:
- exec:
client.0:
- sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2
- sudo ceph osd pool create datapool 4 4 erasure teuthologyprofile
- sudo ceph osd pool set datapool allow_ec_overwrites true
- rbd pool init datapool
overrides:
thrashosds:
bdev_inject_crash: 2
bdev_inject_crash_probability: .5
ceph:
fs: xfs
conf:
client:
rbd default data pool: datapool
osd: # force bluestore since it's required for ec overwrites
osd objectstore: bluestore
bluestore block size: 96636764160
enable experimental unrecoverable data corrupting features: "*"
osd debug randomize hobject sort order: false
# this doesn't work with failures bc the log writes are not atomic across the two backends
# bluestore bluefs env mirror: true
| 873
| 33.96
| 97
|
yaml
|
null |
ceph-main/qa/suites/rbd/encryption/pool/none.yaml
| 0
| 0
| 0
|
yaml
|
|
null |
ceph-main/qa/suites/rbd/encryption/pool/replicated-data-pool.yaml
|
tasks:
- exec:
client.0:
- sudo ceph osd pool create datapool 4
- rbd pool init datapool
overrides:
ceph:
conf:
client:
rbd default data pool: datapool
| 189
| 14.833333
| 44
|
yaml
|
null |
ceph-main/qa/suites/rbd/encryption/pool/small-cache-pool.yaml
|
overrides:
ceph:
log-ignorelist:
- overall HEALTH_
- \(CACHE_POOL_NEAR_FULL\)
- \(CACHE_POOL_NO_HIT_SET\)
tasks:
- exec:
client.0:
- sudo ceph osd pool create cache 4
- sudo ceph osd tier add rbd cache
- sudo ceph osd tier cache-mode cache writeback
- sudo ceph osd tier set-overlay rbd cache
- sudo ceph osd pool set cache hit_set_type bloom
- sudo ceph osd pool set cache hit_set_count 8
- sudo ceph osd pool set cache hit_set_period 60
- sudo ceph osd pool set cache target_max_objects 250
| 569
| 30.666667
| 59
|
yaml
|
null |
ceph-main/qa/suites/rbd/encryption/workloads/qemu_xfstests_luks1.yaml
|
overrides:
install:
ceph:
extra_packages: [rbd-nbd]
tasks:
- qemu:
all:
clone: true
parent_encryption_format: luks1
type: block
disks: 3
test: qa/run_xfstests_qemu.sh
exclude_arch: armv7l
| 234
| 15.785714
| 37
|
yaml
|
null |
ceph-main/qa/suites/rbd/encryption/workloads/qemu_xfstests_luks1_luks1.yaml
|
overrides:
install:
ceph:
extra_packages: [rbd-nbd]
tasks:
- qemu:
all:
clone: true
parent_encryption_format: luks1
encryption_format: luks1
type: block
disks: 3
test: qa/run_xfstests_qemu.sh
exclude_arch: armv7l
| 265
| 16.733333
| 37
|
yaml
|
null |
ceph-main/qa/suites/rbd/encryption/workloads/qemu_xfstests_luks1_luks2.yaml
|
overrides:
install:
ceph:
extra_packages: [rbd-nbd]
tasks:
- qemu:
all:
clone: true
parent_encryption_format: luks1
encryption_format: luks2
type: block
disks: 3
test: qa/run_xfstests_qemu.sh
exclude_arch: armv7l
| 265
| 16.733333
| 37
|
yaml
|
null |
ceph-main/qa/suites/rbd/encryption/workloads/qemu_xfstests_luks2.yaml
|
overrides:
install:
ceph:
extra_packages: [rbd-nbd]
tasks:
- qemu:
all:
clone: true
parent_encryption_format: luks2
type: block
disks: 3
test: qa/run_xfstests_qemu.sh
exclude_arch: armv7l
| 234
| 15.785714
| 37
|
yaml
|
null |
ceph-main/qa/suites/rbd/encryption/workloads/qemu_xfstests_luks2_luks1.yaml
|
overrides:
install:
ceph:
extra_packages: [rbd-nbd]
tasks:
- qemu:
all:
clone: true
parent_encryption_format: luks2
encryption_format: luks1
type: block
disks: 3
test: qa/run_xfstests_qemu.sh
exclude_arch: armv7l
| 265
| 16.733333
| 37
|
yaml
|
null |
ceph-main/qa/suites/rbd/encryption/workloads/qemu_xfstests_luks2_luks2.yaml
|
overrides:
install:
ceph:
extra_packages: [rbd-nbd]
tasks:
- qemu:
all:
clone: true
parent_encryption_format: luks2
encryption_format: luks2
type: block
disks: 3
test: qa/run_xfstests_qemu.sh
exclude_arch: armv7l
| 265
| 16.733333
| 37
|
yaml
|
null |
ceph-main/qa/suites/rbd/encryption/workloads/qemu_xfstests_none_luks1.yaml
|
overrides:
install:
ceph:
extra_packages: [rbd-nbd]
tasks:
- qemu:
all:
clone: true
encryption_format: luks1
type: block
disks: 3
test: qa/run_xfstests_qemu.sh
exclude_arch: armv7l
| 227
| 15.285714
| 35
|
yaml
|
null |
ceph-main/qa/suites/rbd/encryption/workloads/qemu_xfstests_none_luks2.yaml
|
overrides:
install:
ceph:
extra_packages: [rbd-nbd]
tasks:
- qemu:
all:
clone: true
encryption_format: luks2
type: block
disks: 3
test: qa/run_xfstests_qemu.sh
exclude_arch: armv7l
| 227
| 15.285714
| 35
|
yaml
|
null |
ceph-main/qa/suites/rbd/immutable-object-cache/clusters/fix-2.yaml
|
roles:
- [mon.a, mgr.x, osd.0, osd.1]
- [mon.b, mgr.y, osd.2, osd.3, client.0]
| 79
| 19
| 40
|
yaml
|
null |
ceph-main/qa/suites/rbd/immutable-object-cache/clusters/openstack.yaml
|
openstack:
- volumes: # attached to each instance
count: 4
size: 10 # GB
| 93
| 17.8
| 42
|
yaml
|
null |
ceph-main/qa/suites/rbd/immutable-object-cache/pool/ceph_and_immutable_object_cache.yaml
|
tasks:
- install:
extra_packages: ['ceph-immutable-object-cache']
- ceph:
conf:
client:
rbd parent cache enabled: true
rbd plugins: parent_cache
immutable_object_cache_path: /tmp/ceph-immutable-object-cache
immutable_object_cache_max_size: 10G
- immutable_object_cache:
client.0:
| 330
| 24.461538
| 69
|
yaml
|
null |
ceph-main/qa/suites/rbd/immutable-object-cache/workloads/c_api_tests_with_defaults.yaml
|
../../librbd/workloads/c_api_tests_with_defaults.yaml
| 53
| 53
| 53
|
yaml
|
null |
ceph-main/qa/suites/rbd/immutable-object-cache/workloads/fio_on_immutable_object_cache.yaml
|
tasks:
- rbd_fio:
client.0:
thick-provision: true
fio-io-size: 100%
formats: [2]
features: [[layering]]
io-engine: rbd
test-clone-io: 1
rw: randread
runtime: 600
| 214
| 16.916667
| 28
|
yaml
|
null |
ceph-main/qa/suites/rbd/immutable-object-cache/workloads/qemu_on_immutable_object_cache_and_thrash.yaml
|
tasks:
- qemu:
client.0:
clone: true
test: qa/run_xfstests_qemu.sh
type: block
cpus: 4
memory: 4096
disks: 3
- immutable_object_cache_thrash:
client.0:
| 196
| 15.416667
| 35
|
yaml
|
null |
ceph-main/qa/suites/rbd/iscsi/0-single-container-host.yaml
|
.qa/distros/single-container-host.yaml
| 38
| 38
| 38
|
yaml
|
null |
ceph-main/qa/suites/rbd/iscsi/base/install.yaml
|
use_shaman: True
tasks:
- cephadm:
- cephadm.shell:
host.a:
- ceph orch status
- ceph orch ps
- ceph orch ls
- ceph orch host ls
- ceph orch device ls
- install:
extra_packages:
- iscsi-initiator-utils
- device-mapper-multipath
| 268
| 16.933333
| 31
|
yaml
|
null |
ceph-main/qa/suites/rbd/iscsi/cluster/fixed-3.yaml
|
roles:
- - host.a
- mon.a
- mgr.x
- osd.0
- osd.1
- client.0
- ceph.iscsi.iscsi.a
- - mon.b
- osd.2
- osd.3
- osd.4
- client.1
- - mon.c
- osd.5
- osd.6
- osd.7
- client.2
- ceph.iscsi.iscsi.b
| 223
| 10.2
| 22
|
yaml
|
null |
ceph-main/qa/suites/rbd/iscsi/cluster/openstack.yaml
|
openstack:
- machine:
disk: 40 # GB
ram: 8000 # MB
cpus: 1
volumes: # attached to each instance
count: 4
size: 30 # GB
| 155
| 16.333333
| 40
|
yaml
|
null |
ceph-main/qa/suites/rbd/iscsi/workloads/cephadm_iscsi.yaml
|
tasks:
- ceph_iscsi_client:
clients: [client.1]
- cram:
parallel: False
clients:
client.0:
- src/test/cli-integration/rbd/gwcli_create.t
client.1:
- src/test/cli-integration/rbd/iscsi_client.t
client.2:
- src/test/cli-integration/rbd/gwcli_delete.t
- cram:
parallel: False
clients:
client.0:
- src/test/cli-integration/rbd/rest_api_create.t
client.1:
- src/test/cli-integration/rbd/iscsi_client.t
client.2:
- src/test/cli-integration/rbd/rest_api_delete.t
| 548
| 23.954545
| 54
|
yaml
|
null |
ceph-main/qa/suites/rbd/librbd/cache/none.yaml
|
tasks:
- install:
- ceph:
conf:
client:
rbd cache: false
| 75
| 9.857143
| 24
|
yaml
|
null |
ceph-main/qa/suites/rbd/librbd/cache/writearound.yaml
|
tasks:
- install:
- ceph:
conf:
client:
rbd cache: true
rbd cache policy: writearound
| 112
| 13.125
| 37
|
yaml
|
null |
ceph-main/qa/suites/rbd/librbd/cache/writeback.yaml
|
tasks:
- install:
- ceph:
conf:
client:
rbd cache: true
rbd cache policy: writeback
| 110
| 12.875
| 35
|
yaml
|
null |
ceph-main/qa/suites/rbd/librbd/cache/writethrough.yaml
|
tasks:
- install:
- ceph:
conf:
client:
rbd cache: true
rbd cache max dirty: 0
| 105
| 12.25
| 30
|
yaml
|
null |
ceph-main/qa/suites/rbd/librbd/clusters/fixed-3.yaml
|
.qa/clusters/fixed-3.yaml
| 25
| 25
| 25
|
yaml
|
null |
ceph-main/qa/suites/rbd/librbd/clusters/openstack.yaml
|
openstack:
- volumes: # attached to each instance
count: 4
size: 30 # GB
| 87
| 16.6
| 40
|
yaml
|
null |
ceph-main/qa/suites/rbd/librbd/config/copy-on-read.yaml
|
overrides:
ceph:
conf:
client:
rbd clone copy on read: true
| 80
| 12.5
| 36
|
yaml
|
null |
ceph-main/qa/suites/rbd/librbd/config/none.yaml
| 0
| 0
| 0
|
yaml
|
|
null |
ceph-main/qa/suites/rbd/librbd/config/permit-partial-discard.yaml
|
overrides:
ceph:
conf:
client:
rbd skip partial discard: false
| 83
| 13
| 39
|
yaml
|
null |
ceph-main/qa/suites/rbd/librbd/min-compat-client/default.yaml
| 0
| 0
| 0
|
yaml
|
|
null |
ceph-main/qa/suites/rbd/librbd/min-compat-client/octopus.yaml
|
tasks:
- exec:
client.0:
- sudo ceph osd set-require-min-compat-client octopus
| 89
| 17
| 59
|
yaml
|
null |
ceph-main/qa/suites/rbd/librbd/msgr-failures/few.yaml
|
overrides:
ceph:
conf:
global:
ms inject socket failures: 5000
mon client directed command retry: 5
log-ignorelist:
- but it is still running
- \(OSD_SLOW_PING_TIME
| 209
| 20
| 44
|
yaml
|
null |
ceph-main/qa/suites/rbd/librbd/pool/ec-data-pool.yaml
|
tasks:
- exec:
client.0:
- sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2
- sudo ceph osd pool create datapool 4 4 erasure teuthologyprofile
- sudo ceph osd pool set datapool allow_ec_overwrites true
- rbd pool init datapool
overrides:
thrashosds:
bdev_inject_crash: 2
bdev_inject_crash_probability: .5
ceph:
fs: xfs
conf:
client:
rbd default data pool: datapool
osd: # force bluestore since it's required for ec overwrites
osd objectstore: bluestore
bluestore block size: 96636764160
enable experimental unrecoverable data corrupting features: "*"
osd debug randomize hobject sort order: false
# this doesn't work with failures bc the log writes are not atomic across the two backends
# bluestore bluefs env mirror: true
| 873
| 33.96
| 97
|
yaml
|
null |
ceph-main/qa/suites/rbd/librbd/pool/none.yaml
| 0
| 0
| 0
|
yaml
|
|
null |
ceph-main/qa/suites/rbd/librbd/pool/replicated-data-pool.yaml
|
tasks:
- exec:
client.0:
- sudo ceph osd pool create datapool 4
- rbd pool init datapool
overrides:
ceph:
conf:
client:
rbd default data pool: datapool
| 189
| 14.833333
| 44
|
yaml
|
null |
ceph-main/qa/suites/rbd/librbd/pool/small-cache-pool.yaml
|
overrides:
ceph:
log-ignorelist:
- overall HEALTH_
- \(CACHE_POOL_NEAR_FULL\)
- \(CACHE_POOL_NO_HIT_SET\)
tasks:
- exec:
client.0:
- sudo ceph osd pool create cache 4
- sudo ceph osd tier add rbd cache
- sudo ceph osd tier cache-mode cache writeback
- sudo ceph osd tier set-overlay rbd cache
- sudo ceph osd pool set cache hit_set_type bloom
- sudo ceph osd pool set cache hit_set_count 8
- sudo ceph osd pool set cache hit_set_period 60
- sudo ceph osd pool set cache target_max_objects 250
| 569
| 30.666667
| 59
|
yaml
|
null |
ceph-main/qa/suites/rbd/librbd/workloads/c_api_tests.yaml
|
overrides:
ceph:
log-ignorelist:
- overall HEALTH_
- \(CACHE_POOL_NO_HIT_SET\)
- \(POOL_APP_NOT_ENABLED\)
- is full \(reached quota
- \(POOL_FULL\)
tasks:
- workunit:
clients:
client.0:
- rbd/test_librbd.sh
env:
RBD_FEATURES: "1"
| 294
| 17.4375
| 33
|
yaml
|
null |
ceph-main/qa/suites/rbd/librbd/workloads/c_api_tests_with_defaults.yaml
|
overrides:
ceph:
log-ignorelist:
- overall HEALTH_
- \(CACHE_POOL_NO_HIT_SET\)
- \(POOL_APP_NOT_ENABLED\)
- is full \(reached quota
- \(POOL_FULL\)
tasks:
- workunit:
clients:
client.0:
- rbd/test_librbd.sh
env:
RBD_FEATURES: "61"
| 295
| 17.5
| 33
|
yaml
|
null |
ceph-main/qa/suites/rbd/librbd/workloads/c_api_tests_with_journaling.yaml
|
overrides:
ceph:
log-ignorelist:
- overall HEALTH_
- \(CACHE_POOL_NO_HIT_SET\)
- \(POOL_APP_NOT_ENABLED\)
- is full \(reached quota
- \(POOL_FULL\)
tasks:
- workunit:
clients:
client.0:
- rbd/test_librbd.sh
env:
RBD_FEATURES: "125"
| 296
| 17.5625
| 33
|
yaml
|
null |
ceph-main/qa/suites/rbd/librbd/workloads/fsx.yaml
|
tasks:
- rbd_fsx:
clients: [client.0]
ops: 20000
| 57
| 10.6
| 23
|
yaml
|
null |
ceph-main/qa/suites/rbd/librbd/workloads/python_api_tests.yaml
|
tasks:
- workunit:
clients:
client.0:
- rbd/test_librbd_python.sh
env:
RBD_FEATURES: "1"
| 117
| 13.75
| 35
|
yaml
|
null |
ceph-main/qa/suites/rbd/librbd/workloads/python_api_tests_with_defaults.yaml
|
tasks:
- workunit:
clients:
client.0:
- rbd/test_librbd_python.sh
env:
RBD_FEATURES: "61"
| 118
| 13.875
| 35
|
yaml
|
null |
ceph-main/qa/suites/rbd/librbd/workloads/python_api_tests_with_journaling.yaml
|
tasks:
- workunit:
clients:
client.0:
- rbd/test_librbd_python.sh
env:
RBD_FEATURES: "125"
| 119
| 14
| 35
|
yaml
|
null |
ceph-main/qa/suites/rbd/librbd/workloads/rbd_fio.yaml
|
tasks:
- rbd_fio:
client.0:
fio-io-size: 80%
formats: [2]
features: [[layering],[layering,exclusive-lock,object-map]]
io-engine: rbd
test-clone-io: 1
rw: randrw
runtime: 900
| 220
| 19.090909
| 65
|
yaml
|
null |
ceph-main/qa/suites/rbd/maintenance/base/install.yaml
|
tasks:
- install:
- ceph:
| 26
| 5.75
| 10
|
yaml
|
null |
ceph-main/qa/suites/rbd/maintenance/clusters/fixed-3.yaml
|
.qa/clusters/fixed-3.yaml
| 25
| 25
| 25
|
yaml
|
null |
ceph-main/qa/suites/rbd/maintenance/clusters/openstack.yaml
|
../../qemu/clusters/openstack.yaml
| 34
| 34
| 34
|
yaml
|
null |
ceph-main/qa/suites/rbd/maintenance/qemu/xfstests.yaml
|
tasks:
- parallel:
- io_workload
- op_workload
io_workload:
sequential:
- qemu:
client.0:
clone: true
type: block
disks: 3
time_wait: 120
test: qa/run_xfstests_qemu.sh
exclude_arch: armv7l
| 261
| 16.466667
| 39
|
yaml
|
null |
ceph-main/qa/suites/rbd/maintenance/workloads/dynamic_features.yaml
|
op_workload:
sequential:
- workunit:
clients:
client.0:
- rbd/qemu_dynamic_features.sh
env:
IMAGE_NAME: client.0.1-clone
| 173
| 18.333333
| 40
|
yaml
|
null |
ceph-main/qa/suites/rbd/maintenance/workloads/dynamic_features_no_cache.yaml
|
overrides:
ceph:
conf:
client:
rbd cache: false
op_workload:
sequential:
- workunit:
clients:
client.0:
- rbd/qemu_dynamic_features.sh
env:
IMAGE_NAME: client.0.1-clone
timeout: 0
| 260
| 16.4
| 40
|
yaml
|
null |
ceph-main/qa/suites/rbd/maintenance/workloads/rebuild_object_map.yaml
|
op_workload:
sequential:
- workunit:
clients:
client.0:
- rbd/qemu_rebuild_object_map.sh
env:
IMAGE_NAME: client.0.1-clone
| 175
| 18.555556
| 42
|
yaml
|
null |
ceph-main/qa/suites/rbd/migration/2-clusters/fixed-3.yaml
|
.qa/clusters/fixed-3.yaml
| 25
| 25
| 25
|
yaml
|
null |
ceph-main/qa/suites/rbd/migration/2-clusters/openstack.yaml
|
openstack:
- machine:
disk: 40 # GB
ram: 30000 # MB
cpus: 1
volumes: # attached to each instance
count: 4
size: 30 # GB
| 156
| 16.444444
| 40
|
yaml
|
null |
ceph-main/qa/suites/rbd/migration/5-pool/ec-data-pool.yaml
|
tasks:
- exec:
client.0:
- sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2
- sudo ceph osd pool create datapool 4 4 erasure teuthologyprofile
- sudo ceph osd pool set datapool allow_ec_overwrites true
- rbd pool init datapool
overrides:
thrashosds:
bdev_inject_crash: 2
bdev_inject_crash_probability: .5
ceph:
fs: xfs
conf:
client:
rbd default data pool: datapool
osd: # force bluestore since it's required for ec overwrites
osd objectstore: bluestore
bluestore block size: 96636764160
enable experimental unrecoverable data corrupting features: "*"
osd debug randomize hobject sort order: false
# this doesn't work with failures bc the log writes are not atomic across the two backends
# bluestore bluefs env mirror: true
| 873
| 33.96
| 97
|
yaml
|
null |
ceph-main/qa/suites/rbd/migration/5-pool/none.yaml
| 0
| 0
| 0
|
yaml
|
|
null |
ceph-main/qa/suites/rbd/migration/5-pool/replicated-data-pool.yaml
|
tasks:
- exec:
client.0:
- sudo ceph osd pool create datapool 4
- rbd pool init datapool
overrides:
ceph:
conf:
client:
rbd default data pool: datapool
| 189
| 14.833333
| 44
|
yaml
|
null |
ceph-main/qa/suites/rbd/migration/6-prepare/qcow2-file.yaml
|
tasks:
- exec:
client.0:
- mkdir /home/ubuntu/cephtest/migration
- wget -nv -O /home/ubuntu/cephtest/migration/base.client.0.qcow2 http://download.ceph.com/qa/ubuntu-12.04.qcow2
- qemu-img create -f qcow2 /home/ubuntu/cephtest/migration/empty.qcow2 1G
- echo '{"type":"qcow","stream":{"type":"file","file_path":"/home/ubuntu/cephtest/migration/base.client.0.qcow2"}}' | rbd migration prepare --import-only --source-spec-path - client.0.0
- rbd migration prepare --import-only --source-spec '{"type":"qcow","stream":{"type":"file","file_path":"/home/ubuntu/cephtest/migration/empty.qcow2"}}' client.0.1
- rbd migration prepare --import-only --source-spec '{"type":"qcow","stream":{"type":"file","file_path":"/home/ubuntu/cephtest/migration/empty.qcow2"}}' client.0.2
| 822
| 81.3
| 193
|
yaml
|
null |
ceph-main/qa/suites/rbd/migration/6-prepare/qcow2-http.yaml
|
tasks:
- exec:
client.0:
- mkdir /home/ubuntu/cephtest/migration
- qemu-img create -f qcow2 /home/ubuntu/cephtest/migration/empty.qcow2 1G
- echo '{"type":"qcow","stream":{"type":"http","url":"http://download.ceph.com/qa/ubuntu-12.04.qcow2"}}' | rbd migration prepare --import-only --source-spec-path - client.0.0
- rbd migration prepare --import-only --source-spec '{"type":"qcow","stream":{"type":"file","file_path":"/home/ubuntu/cephtest/migration/empty.qcow2"}}' client.0.1
- rbd migration prepare --import-only --source-spec '{"type":"qcow","stream":{"type":"file","file_path":"/home/ubuntu/cephtest/migration/empty.qcow2"}}' client.0.2
| 690
| 75.777778
| 182
|
yaml
|
null |
ceph-main/qa/suites/rbd/migration/6-prepare/raw-file.yaml
|
tasks:
- exec:
client.0:
- mkdir /home/ubuntu/cephtest/migration
- wget -nv -O /home/ubuntu/cephtest/migration/base.client.0.qcow2 http://download.ceph.com/qa/ubuntu-12.04.qcow2
- qemu-img convert -f qcow2 -O raw /home/ubuntu/cephtest/migration/base.client.0.qcow2 /home/ubuntu/cephtest/migration/base.client.0.raw
- dd if=/dev/zero of=/home/ubuntu/cephtest/migration/empty.raw count=1 bs=1G
- echo '{"type":"raw","stream":{"type":"file","file_path":"/home/ubuntu/cephtest/migration/base.client.0.raw"}}' | rbd migration prepare --import-only --source-spec-path - client.0.0
- rbd migration prepare --import-only --source-spec '{"type":"raw","stream":{"type":"file","file_path":"/home/ubuntu/cephtest/migration/empty.raw"}}' client.0.1
- rbd migration prepare --import-only --source-spec '{"type":"raw","stream":{"type":"file","file_path":"/home/ubuntu/cephtest/migration/empty.raw"}}' client.0.2
| 961
| 86.454545
| 190
|
yaml
|
null |
ceph-main/qa/suites/rbd/migration/7-io-workloads/qemu_xfstests.yaml
|
io_workload:
sequential:
- qemu:
client.0:
clone: true
type: block
disks:
- action: none
image_name: client.0.0
- action: none
image_name: client.0.1
- action: none
image_name: client.0.2
test: qa/run_xfstests_qemu.sh
exclude_arch: armv7l
| 371
| 22.25
| 39
|
yaml
|
null |
ceph-main/qa/suites/rbd/migration/8-migrate-workloads/execute.yaml
|
tasks:
- parallel:
- io_workload
- migrate_workload
migrate_workload:
sequential:
- exec:
client.0:
- sleep 60
- rbd migration execute client.0.0
- sleep 60
- rbd migration commit client.0.0
- sleep 60
- rbd migration execute client.0.1
| 325
| 20.733333
| 44
|
yaml
|
null |
ceph-main/qa/suites/rbd/migration/9-cleanup/cleanup.yaml
|
tasks:
- exec:
client.0:
- rm -rf /home/ubuntu/cephtest/migration
| 82
| 15.6
| 48
|
yaml
|
null |
ceph-main/qa/suites/rbd/mirror-thrash/base/install.yaml
|
meta:
- desc: run two ceph clusters and install rbd-mirror
tasks:
- install:
extra_packages: [rbd-mirror]
- ceph:
cluster: cluster1
- ceph:
cluster: cluster2
| 170
| 16.1
| 52
|
yaml
|
null |
ceph-main/qa/suites/rbd/mirror-thrash/clients/mirror.yaml
|
meta:
- desc: configure the permissions for client.mirror
overrides:
ceph:
conf:
client:
rbd default features: 125
debug rbd: 20
debug rbd_mirror: 15
log to stderr: false
# override to make these names predictable
client.mirror.0:
admin socket: /var/run/ceph/rbd-mirror.$cluster-$name.asok
pid file: /var/run/ceph/rbd-mirror.$cluster-$name.pid
client.mirror.1:
admin socket: /var/run/ceph/rbd-mirror.$cluster-$name.asok
pid file: /var/run/ceph/rbd-mirror.$cluster-$name.pid
client.mirror.2:
admin socket: /var/run/ceph/rbd-mirror.$cluster-$name.asok
pid file: /var/run/ceph/rbd-mirror.$cluster-$name.pid
client.mirror.3:
admin socket: /var/run/ceph/rbd-mirror.$cluster-$name.asok
pid file: /var/run/ceph/rbd-mirror.$cluster-$name.pid
tasks:
- exec:
cluster1.client.mirror.0:
- "sudo ceph --cluster cluster1 auth caps client.mirror mon 'profile rbd-mirror-peer' osd 'profile rbd'"
- "sudo ceph --cluster cluster1 auth caps client.mirror.0 mon 'profile rbd-mirror' osd 'profile rbd'"
- "sudo ceph --cluster cluster1 auth caps client.mirror.1 mon 'profile rbd-mirror' osd 'profile rbd'"
- "sudo ceph --cluster cluster1 auth caps client.mirror.2 mon 'profile rbd-mirror' osd 'profile rbd'"
- "sudo ceph --cluster cluster1 auth caps client.mirror.3 mon 'profile rbd-mirror' osd 'profile rbd'"
- "sudo ceph --cluster cluster2 auth caps client.mirror mon 'profile rbd-mirror-peer' osd 'profile rbd'"
- "sudo ceph --cluster cluster2 auth caps client.mirror.0 mon 'profile rbd-mirror' osd 'profile rbd'"
- "sudo ceph --cluster cluster2 auth caps client.mirror.1 mon 'profile rbd-mirror' osd 'profile rbd'"
- "sudo ceph --cluster cluster2 auth caps client.mirror.2 mon 'profile rbd-mirror' osd 'profile rbd'"
- "sudo ceph --cluster cluster2 auth caps client.mirror.3 mon 'profile rbd-mirror' osd 'profile rbd'"
| 2,003
| 53.162162
| 110
|
yaml
|
null |
ceph-main/qa/suites/rbd/mirror-thrash/cluster/2-node.yaml
|
meta:
- desc: 2 ceph clusters with 1 mon and 3 osds each
roles:
- - cluster1.mon.a
- cluster1.mgr.x
- cluster2.mgr.x
- cluster1.osd.0
- cluster1.osd.1
- cluster1.osd.2
- cluster1.client.0
- cluster2.client.0
- - cluster2.mon.a
- cluster2.osd.0
- cluster2.osd.1
- cluster2.osd.2
- cluster1.client.mirror
- cluster1.client.mirror.0
- cluster1.client.mirror.1
- cluster1.client.mirror.2
- cluster1.client.mirror.3
- cluster1.client.mirror.4
- cluster1.client.mirror.5
- cluster1.client.mirror.6
- cluster2.client.mirror
- cluster2.client.mirror.0
- cluster2.client.mirror.1
- cluster2.client.mirror.2
- cluster2.client.mirror.3
- cluster2.client.mirror.4
- cluster2.client.mirror.5
- cluster2.client.mirror.6
| 758
| 22.71875
| 50
|
yaml
|
null |
ceph-main/qa/suites/rbd/mirror-thrash/cluster/openstack.yaml
|
openstack:
- volumes: # attached to each instance
count: 3
size: 30 # GB
| 87
| 16.6
| 40
|
yaml
|
null |
ceph-main/qa/suites/rbd/mirror-thrash/policy/none.yaml
|
overrides:
ceph:
conf:
client:
rbd mirror image policy type: none
| 86
| 13.5
| 42
|
yaml
|
null |
ceph-main/qa/suites/rbd/mirror-thrash/policy/simple.yaml
|
overrides:
ceph:
conf:
client:
rbd mirror image policy type: simple
| 88
| 13.833333
| 44
|
yaml
|
null |
ceph-main/qa/suites/rbd/mirror-thrash/rbd-mirror/four-per-cluster.yaml
|
meta:
- desc: run four rbd-mirror daemons per cluster
tasks:
- rbd-mirror:
client: cluster1.client.mirror.0
thrash: True
- rbd-mirror:
client: cluster1.client.mirror.1
thrash: True
- rbd-mirror:
client: cluster1.client.mirror.2
thrash: True
- rbd-mirror:
client: cluster1.client.mirror.3
thrash: True
- rbd-mirror:
client: cluster2.client.mirror.0
thrash: True
- rbd-mirror:
client: cluster2.client.mirror.1
thrash: True
- rbd-mirror:
client: cluster2.client.mirror.2
thrash: True
- rbd-mirror:
client: cluster2.client.mirror.3
thrash: True
- rbd-mirror-thrash:
cluster: cluster1
- rbd-mirror-thrash:
cluster: cluster2
| 691
| 20.625
| 47
|
yaml
|
null |
ceph-main/qa/suites/rbd/mirror-thrash/workloads/rbd-mirror-fsx-workunit.yaml
|
meta:
- desc: run multiple FSX workloads to simulate cluster load and then verify
that the images were replicated
tasks:
- workunit:
clients:
cluster1.client.mirror: [rbd/rbd_mirror_fsx_prepare.sh]
env:
# override workunit setting of CEPH_ARGS='--cluster'
CEPH_ARGS: ''
RBD_MIRROR_NOCLEANUP: '1'
RBD_MIRROR_USE_EXISTING_CLUSTER: '1'
RBD_MIRROR_USE_RBD_MIRROR: '1'
- rbd_fsx:
clients:
- cluster1.client.mirror.0
- cluster1.client.mirror.1
- cluster1.client.mirror.2
- cluster1.client.mirror.3
- cluster1.client.mirror.4
- cluster1.client.mirror.5
ops: 6000
keep_images: true
pool_name: mirror
- workunit:
clients:
cluster1.client.mirror: [rbd/rbd_mirror_fsx_compare.sh]
env:
# override workunit setting of CEPH_ARGS='--cluster'
CEPH_ARGS: ''
RBD_MIRROR_USE_EXISTING_CLUSTER: '1'
RBD_MIRROR_USE_RBD_MIRROR: '1'
timeout: 6h
| 967
| 27.470588
| 75
|
yaml
|
null |
ceph-main/qa/suites/rbd/mirror-thrash/workloads/rbd-mirror-journal-stress-workunit.yaml
|
meta:
- desc: run the rbd_mirror_stress.sh workunit to test the rbd-mirror daemon
tasks:
- workunit:
clients:
cluster1.client.mirror: [rbd/rbd_mirror_stress.sh]
env:
# override workunit setting of CEPH_ARGS='--cluster'
CEPH_ARGS: ''
RBD_MIRROR_INSTANCES: '4'
RBD_MIRROR_USE_EXISTING_CLUSTER: '1'
RBD_MIRROR_USE_RBD_MIRROR: '1'
MIRROR_POOL_MODE: 'pool'
MIRROR_IMAGE_MODE: 'journal'
timeout: 6h
| 453
| 27.375
| 75
|
yaml
|
null |
ceph-main/qa/suites/rbd/mirror-thrash/workloads/rbd-mirror-journal-workunit.yaml
|
meta:
- desc: run the rbd_mirror_journal.sh workunit to test the rbd-mirror daemon
tasks:
- workunit:
clients:
cluster1.client.mirror: [rbd/rbd_mirror_journal.sh]
env:
# override workunit setting of CEPH_ARGS='--cluster'
CEPH_ARGS: ''
RBD_MIRROR_INSTANCES: '4'
RBD_MIRROR_USE_EXISTING_CLUSTER: '1'
RBD_MIRROR_USE_RBD_MIRROR: '1'
| 373
| 27.769231
| 76
|
yaml
|
null |
ceph-main/qa/suites/rbd/mirror-thrash/workloads/rbd-mirror-snapshot-stress-workunit-exclusive-lock.yaml
|
meta:
- desc: run the rbd_mirror_stress.sh workunit to test the rbd-mirror daemon
tasks:
- workunit:
clients:
cluster1.client.mirror: [rbd/rbd_mirror_stress.sh]
env:
# override workunit setting of CEPH_ARGS='--cluster'
CEPH_ARGS: ''
MIRROR_POOL_MODE: 'image'
MIRROR_IMAGE_MODE: 'snapshot'
RBD_IMAGE_FEATURES: 'layering,exclusive-lock'
RBD_MIRROR_INSTANCES: '4'
RBD_MIRROR_USE_EXISTING_CLUSTER: '1'
RBD_MIRROR_USE_RBD_MIRROR: '1'
timeout: 6h
| 507
| 28.882353
| 75
|
yaml
|
null |
ceph-main/qa/suites/rbd/mirror-thrash/workloads/rbd-mirror-snapshot-stress-workunit-fast-diff.yaml
|
meta:
- desc: run the rbd_mirror_stress.sh workunit to test the rbd-mirror daemon
tasks:
- workunit:
clients:
cluster1.client.mirror: [rbd/rbd_mirror_stress.sh]
env:
# override workunit setting of CEPH_ARGS='--cluster'
CEPH_ARGS: ''
MIRROR_POOL_MODE: 'image'
MIRROR_IMAGE_MODE: 'snapshot'
RBD_IMAGE_FEATURES: 'layering,exclusive-lock,object-map,fast-diff'
RBD_MIRROR_INSTANCES: '4'
RBD_MIRROR_USE_EXISTING_CLUSTER: '1'
RBD_MIRROR_USE_RBD_MIRROR: '1'
timeout: 6h
| 528
| 30.117647
| 75
|
yaml
|
null |
ceph-main/qa/suites/rbd/mirror-thrash/workloads/rbd-mirror-snapshot-stress-workunit-minimum.yaml
|
meta:
- desc: run the rbd_mirror_stress.sh workunit to test the rbd-mirror daemon
tasks:
- workunit:
clients:
cluster1.client.mirror: [rbd/rbd_mirror_stress.sh]
env:
# override workunit setting of CEPH_ARGS='--cluster'
CEPH_ARGS: ''
MIRROR_POOL_MODE: 'image'
MIRROR_IMAGE_MODE: 'snapshot'
RBD_IMAGE_FEATURES: 'layering'
RBD_MIRROR_INSTANCES: '4'
RBD_MIRROR_USE_EXISTING_CLUSTER: '1'
RBD_MIRROR_USE_RBD_MIRROR: '1'
timeout: 6h
| 492
| 28
| 75
|
yaml
|
null |
ceph-main/qa/suites/rbd/mirror/clients/mirror-extra.yaml
|
meta:
- desc: configure the permissions for client.mirror
overrides:
ceph:
conf:
# override to make these names predictable
client.mirror.4:
admin socket: /var/run/ceph/rbd-mirror.$cluster-$name.asok
pid file: /var/run/ceph/rbd-mirror.$cluster-$name.pid
client.mirror.5:
admin socket: /var/run/ceph/rbd-mirror.$cluster-$name.asok
pid file: /var/run/ceph/rbd-mirror.$cluster-$name.pid
client.mirror.6:
admin socket: /var/run/ceph/rbd-mirror.$cluster-$name.asok
pid file: /var/run/ceph/rbd-mirror.$cluster-$name.pid
tasks:
- exec:
cluster1.client.mirror.0:
- "sudo ceph --cluster cluster1 auth caps client.mirror.4 mon 'profile rbd-mirror' osd 'profile rbd'"
- "sudo ceph --cluster cluster1 auth caps client.mirror.5 mon 'profile rbd-mirror' osd 'profile rbd'"
- "sudo ceph --cluster cluster1 auth caps client.mirror.6 mon 'profile rbd-mirror' osd 'profile rbd'"
- "sudo ceph --cluster cluster2 auth caps client.mirror.4 mon 'profile rbd-mirror' osd 'profile rbd'"
- "sudo ceph --cluster cluster2 auth caps client.mirror.5 mon 'profile rbd-mirror' osd 'profile rbd'"
- "sudo ceph --cluster cluster2 auth caps client.mirror.6 mon 'profile rbd-mirror' osd 'profile rbd'"
| 1,285
| 50.44
| 107
|
yaml
|
null |
ceph-main/qa/suites/rbd/mirror/clients/mirror.yaml
|
../../mirror-thrash/clients/mirror.yaml
| 39
| 39
| 39
|
yaml
|
null |
ceph-main/qa/suites/rbd/mirror/workloads/rbd-mirror-ha-workunit.yaml
|
meta:
- desc: run the rbd_mirror_ha.sh workunit to test the rbd-mirror daemon
overrides:
ceph:
conf:
client:
rbd mirror image policy type: none
tasks:
- workunit:
clients:
cluster1.client.mirror: [rbd/rbd_mirror_ha.sh]
env:
# override workunit setting of CEPH_ARGS='--cluster'
CEPH_ARGS: ''
RBD_MIRROR_USE_EXISTING_CLUSTER: '1'
timeout: 6h
| 396
| 22.352941
| 71
|
yaml
|
null |
ceph-main/qa/suites/rbd/mirror/workloads/rbd-mirror-journal-bootstrap-workunit.yaml
|
meta:
- desc: run the rbd_mirror_bootstrap.sh workunit to test the rbd-mirror daemon
tasks:
- workunit:
clients:
cluster1.client.mirror: [rbd/rbd_mirror_bootstrap.sh]
env:
# override workunit setting of CEPH_ARGS='--cluster'
CEPH_ARGS: ''
RBD_MIRROR_INSTANCES: '1'
RBD_MIRROR_USE_EXISTING_CLUSTER: '1'
MIRROR_POOL_MODE: 'pool'
MIRROR_IMAGE_MODE: 'journal'
| 406
| 28.071429
| 78
|
yaml
|
null |
ceph-main/qa/suites/rbd/mirror/workloads/rbd-mirror-snapshot-bootstrap-workunit.yaml
|
meta:
- desc: run the rbd_mirror_bootstrap.sh workunit to test the rbd-mirror daemon
tasks:
- workunit:
clients:
cluster1.client.mirror: [rbd/rbd_mirror_bootstrap.sh]
env:
# override workunit setting of CEPH_ARGS='--cluster'
CEPH_ARGS: ''
RBD_MIRROR_INSTANCES: '1'
RBD_MIRROR_USE_EXISTING_CLUSTER: '1'
MIRROR_POOL_MODE: 'image'
MIRROR_IMAGE_MODE: 'snapshot'
| 408
| 28.214286
| 78
|
yaml
|
null |
ceph-main/qa/suites/rbd/mirror/workloads/rbd-mirror-snapshot-workunit-exclusive-lock.yaml
|
meta:
- desc: run the rbd_mirror_snapshot.sh workunit to test the rbd-mirror daemon
tasks:
- workunit:
clients:
cluster1.client.mirror: [rbd/rbd_mirror_snapshot.sh]
env:
# override workunit setting of CEPH_ARGS='--cluster'
CEPH_ARGS: ''
RBD_MIRROR_INSTANCES: '4'
RBD_MIRROR_USE_EXISTING_CLUSTER: '1'
RBD_MIRROR_CONFIG_KEY: '1'
RBD_IMAGE_FEATURES: 'layering,exclusive-lock'
| 423
| 29.285714
| 77
|
yaml
|
null |
ceph-main/qa/suites/rbd/mirror/workloads/rbd-mirror-snapshot-workunit-fast-diff.yaml
|
meta:
- desc: run the rbd_mirror_snapshot.sh workunit to test the rbd-mirror daemon
tasks:
- workunit:
clients:
cluster1.client.mirror: [rbd/rbd_mirror_snapshot.sh]
env:
# override workunit setting of CEPH_ARGS='--cluster'
CEPH_ARGS: ''
RBD_MIRROR_INSTANCES: '4'
RBD_MIRROR_USE_EXISTING_CLUSTER: '1'
RBD_MIRROR_CONFIG_KEY: '1'
RBD_IMAGE_FEATURES: 'layering,exclusive-lock,object-map,fast-diff'
| 444
| 30.785714
| 77
|
yaml
|
null |
ceph-main/qa/suites/rbd/mirror/workloads/rbd-mirror-snapshot-workunit-journaling.yaml
|
meta:
- desc: run the rbd_mirror_snapshot.sh workunit to test the rbd-mirror daemon
tasks:
- workunit:
clients:
cluster1.client.mirror: [rbd/rbd_mirror_snapshot.sh]
env:
# override workunit setting of CEPH_ARGS='--cluster'
CEPH_ARGS: ''
RBD_MIRROR_INSTANCES: '4'
RBD_MIRROR_USE_EXISTING_CLUSTER: '1'
RBD_MIRROR_CONFIG_KEY: '1'
RBD_IMAGE_FEATURES: 'layering,exclusive-lock,journaling'
| 434
| 30.071429
| 77
|
yaml
|
null |
ceph-main/qa/suites/rbd/mirror/workloads/rbd-mirror-snapshot-workunit-minimum.yaml
|
meta:
- desc: run the rbd_mirror_snapshot.sh workunit to test the rbd-mirror daemon
tasks:
- workunit:
clients:
cluster1.client.mirror: [rbd/rbd_mirror_snapshot.sh]
env:
# override workunit setting of CEPH_ARGS='--cluster'
CEPH_ARGS: ''
RBD_MIRROR_INSTANCES: '4'
RBD_MIRROR_USE_EXISTING_CLUSTER: '1'
RBD_MIRROR_CONFIG_KEY: '1'
RBD_IMAGE_FEATURES: 'layering'
| 408
| 28.214286
| 77
|
yaml
|
null |
ceph-main/qa/suites/rbd/mirror/workloads/rbd-mirror-stress-workunit-min-compat-client-octopus.yaml
|
meta:
- desc: run the rbd_mirror_stress.sh workunit to test the rbd-mirror daemon
tasks:
- workunit:
clients:
cluster1.client.mirror: [rbd/rbd_mirror_stress.sh]
env:
# override workunit setting of CEPH_ARGS='--cluster'
CEPH_ARGS: ''
RBD_MIRROR_USE_EXISTING_CLUSTER: '1'
RBD_MIRROR_MIN_COMPAT_CLIENT: 'octopus'
| 348
| 28.083333
| 75
|
yaml
|
null |
ceph-main/qa/suites/rbd/mirror/workloads/rbd-mirror-workunit-config-key.yaml
|
meta:
- desc: run the rbd_mirror_journal.sh workunit to test the rbd-mirror daemon
tasks:
- workunit:
clients:
cluster1.client.mirror: [rbd/rbd_mirror_journal.sh]
env:
# override workunit setting of CEPH_ARGS='--cluster'
CEPH_ARGS: ''
RBD_MIRROR_INSTANCES: '4'
RBD_MIRROR_USE_EXISTING_CLUSTER: '1'
RBD_MIRROR_CONFIG_KEY: '1'
| 369
| 27.461538
| 76
|
yaml
|