Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
|---|---|---|---|---|---|---|
null |
ceph-main/qa/suites/rbd/mirror/workloads/rbd-mirror-workunit-min-compat-client-octopus.yaml
|
meta:
- desc: run the rbd_mirror_journal.sh workunit to test the rbd-mirror daemon
tasks:
- workunit:
clients:
cluster1.client.mirror: [rbd/rbd_mirror_journal.sh]
env:
# override workunit setting of CEPH_ARGS='--cluster'
CEPH_ARGS: ''
RBD_MIRROR_USE_EXISTING_CLUSTER: '1'
RBD_MIRROR_MIN_COMPAT_CLIENT: 'octopus'
| 350
| 28.25
| 76
|
yaml
|
null |
ceph-main/qa/suites/rbd/mirror/workloads/rbd-mirror-workunit-policy-none.yaml
|
meta:
- desc: run the rbd_mirror_journal.sh workunit to test the rbd-mirror daemon
overrides:
ceph:
conf:
client:
rbd mirror image policy type: none
tasks:
- workunit:
clients:
cluster1.client.mirror: [rbd/rbd_mirror_journal.sh]
env:
# override workunit setting of CEPH_ARGS='--cluster'
CEPH_ARGS: ''
RBD_MIRROR_INSTANCES: '4'
RBD_MIRROR_USE_EXISTING_CLUSTER: '1'
| 422
| 23.882353
| 76
|
yaml
|
null |
ceph-main/qa/suites/rbd/mirror/workloads/rbd-mirror-workunit-policy-simple.yaml
|
meta:
- desc: run the rbd_mirror_journal.sh workunit to test the rbd-mirror daemon
overrides:
ceph:
conf:
client:
rbd mirror image policy type: simple
tasks:
- workunit:
clients:
cluster1.client.mirror: [rbd/rbd_mirror_journal.sh]
env:
# override workunit setting of CEPH_ARGS='--cluster'
CEPH_ARGS: ''
RBD_MIRROR_INSTANCES: '4'
RBD_MIRROR_USE_EXISTING_CLUSTER: '1'
| 424
| 24
| 76
|
yaml
|
null |
ceph-main/qa/suites/rbd/nbd/thrashosds-health.yaml
|
.qa/tasks/thrashosds-health.yaml
| 32
| 32
| 32
|
yaml
|
null |
ceph-main/qa/suites/rbd/nbd/cluster/fixed-3.yaml
|
roles:
- [mon.a, mon.c, osd.0, osd.1, osd.2]
- [mon.b, mgr.x, osd.3, osd.4, osd.5]
- [client.0]
| 96
| 18.4
| 37
|
yaml
|
null |
ceph-main/qa/suites/rbd/nbd/cluster/openstack.yaml
|
../../thrash/clusters/openstack.yaml
| 36
| 36
| 36
|
yaml
|
null |
ceph-main/qa/suites/rbd/nbd/workloads/rbd_fsx_nbd.yaml
|
overrides:
install:
ceph:
extra_packages: [rbd-nbd]
tasks:
- rbd_fsx:
clients: [client.0]
ops: 6000
nbd: True
holebdy: 512
punch_holes: true
readbdy: 512
truncbdy: 512
writebdy: 512
| 226
| 14.133333
| 31
|
yaml
|
null |
ceph-main/qa/suites/rbd/nbd/workloads/rbd_nbd.yaml
|
overrides:
install:
ceph:
extra_packages: [rbd-nbd]
tasks:
- workunit:
clients:
client.0:
- rbd/rbd-nbd.sh
| 137
| 12.8
| 31
|
yaml
|
null |
ceph-main/qa/suites/rbd/nbd/workloads/rbd_nbd_diff_continuous.yaml
|
overrides:
install:
ceph:
extra_packages:
- rbd-nbd
extra_system_packages:
- pv
tasks:
- workunit:
clients:
client.0:
- rbd/diff_continuous.sh
env:
RBD_DEVICE_TYPE: "nbd"
| 233
| 14.6
| 32
|
yaml
|
null |
ceph-main/qa/suites/rbd/pwl-cache/home/4-cache-path.yaml
|
overrides:
ceph:
conf:
client:
rbd_persistent_cache_path: /home/ubuntu/cephtest/rbd-pwl-cache
rbd_plugins: pwl_cache
tasks:
- exec:
client.0:
- "mkdir -m 777 /home/ubuntu/cephtest/rbd-pwl-cache"
- exec_on_cleanup:
client.0:
- "rm -rf /home/ubuntu/cephtest/rbd-pwl-cache"
| 319
| 21.857143
| 70
|
yaml
|
null |
ceph-main/qa/suites/rbd/pwl-cache/home/2-cluster/fix-2.yaml
|
roles:
- [mon.a, mgr.x, osd.0, osd.1]
- [mon.b, mgr.y, osd.2, osd.3, client.0]
| 79
| 19
| 40
|
yaml
|
null |
ceph-main/qa/suites/rbd/pwl-cache/home/2-cluster/openstack.yaml
|
openstack:
- volumes: # attached to each instance
count: 4
size: 10 # GB
| 93
| 17.8
| 42
|
yaml
|
null |
ceph-main/qa/suites/rbd/pwl-cache/home/5-cache-mode/rwl.yaml
|
overrides:
ceph:
conf:
client:
rbd_persistent_cache_mode: rwl
| 82
| 12.833333
| 38
|
yaml
|
null |
ceph-main/qa/suites/rbd/pwl-cache/home/5-cache-mode/ssd.yaml
|
overrides:
ceph:
conf:
client:
rbd_persistent_cache_mode: ssd
| 82
| 12.833333
| 38
|
yaml
|
null |
ceph-main/qa/suites/rbd/pwl-cache/home/6-cache-size/1G.yaml
|
overrides:
ceph:
conf:
client:
rbd_persistent_cache_size: 1073741824
| 89
| 14
| 45
|
yaml
|
null |
ceph-main/qa/suites/rbd/pwl-cache/home/6-cache-size/8G.yaml
|
overrides:
ceph:
conf:
client:
rbd_persistent_cache_size: 8589934592
| 89
| 14
| 45
|
yaml
|
null |
ceph-main/qa/suites/rbd/pwl-cache/home/7-workloads/c_api_tests_with_defaults.yaml
|
../../../librbd/workloads/c_api_tests_with_defaults.yaml
| 56
| 56
| 56
|
yaml
|
null |
ceph-main/qa/suites/rbd/pwl-cache/home/7-workloads/fio.yaml
|
tasks:
- rbd_fio:
client.0:
fio-io-size: 100%
formats: [2]
io-engine: rbd
rw: randwrite
runtime: 600
| 135
| 14.111111
| 23
|
yaml
|
null |
ceph-main/qa/suites/rbd/pwl-cache/home/7-workloads/recovery.yaml
|
tasks:
- rbd.create_image:
client.0:
image_name: testimage
image_size: 10240
image_format: 2
- rbd_pwl_cache_recovery:
client.0:
image_name: testimage
| 183
| 17.4
| 27
|
yaml
|
null |
ceph-main/qa/suites/rbd/pwl-cache/tmpfs/4-cache-path.yaml
|
overrides:
ceph:
conf:
client:
rbd_persistent_cache_path: /home/ubuntu/cephtest/rbd-pwl-cache
rbd_plugins: pwl_cache
tasks:
- exec:
client.0:
- "mkdir /home/ubuntu/cephtest/tmpfs"
- "mkdir /home/ubuntu/cephtest/rbd-pwl-cache"
- "sudo mount -t tmpfs -o size=20G tmpfs /home/ubuntu/cephtest/tmpfs"
- "truncate -s 20G /home/ubuntu/cephtest/tmpfs/loopfile"
- "mkfs.ext4 /home/ubuntu/cephtest/tmpfs/loopfile"
- "sudo mount -o loop /home/ubuntu/cephtest/tmpfs/loopfile /home/ubuntu/cephtest/rbd-pwl-cache"
- "sudo chmod 777 /home/ubuntu/cephtest/rbd-pwl-cache"
- exec_on_cleanup:
client.0:
- "sudo umount /home/ubuntu/cephtest/rbd-pwl-cache"
- "sudo umount /home/ubuntu/cephtest/tmpfs"
- "rm -rf /home/ubuntu/cephtest/rbd-pwl-cache"
- "rm -rf /home/ubuntu/cephtest/tmpfs"
| 868
| 36.782609
| 101
|
yaml
|
null |
ceph-main/qa/suites/rbd/pwl-cache/tmpfs/2-cluster/fix-2.yaml
|
roles:
- [mon.a, mgr.x, osd.0, osd.1]
- [mon.b, mgr.y, osd.2, osd.3, client.0]
| 79
| 19
| 40
|
yaml
|
null |
ceph-main/qa/suites/rbd/pwl-cache/tmpfs/2-cluster/openstack.yaml
|
openstack:
- volumes: # attached to each instance
count: 4
size: 10 # GB
| 93
| 17.8
| 42
|
yaml
|
null |
ceph-main/qa/suites/rbd/pwl-cache/tmpfs/5-cache-mode/rwl.yaml
|
overrides:
ceph:
conf:
client:
rbd_persistent_cache_mode: rwl
| 82
| 12.833333
| 38
|
yaml
|
null |
ceph-main/qa/suites/rbd/pwl-cache/tmpfs/5-cache-mode/ssd.yaml
|
overrides:
ceph:
conf:
client:
rbd_persistent_cache_mode: ssd
| 82
| 12.833333
| 38
|
yaml
|
null |
ceph-main/qa/suites/rbd/pwl-cache/tmpfs/6-cache-size/1G.yaml
|
overrides:
ceph:
conf:
client:
rbd_persistent_cache_size: 1073741824
| 89
| 14
| 45
|
yaml
|
null |
ceph-main/qa/suites/rbd/pwl-cache/tmpfs/6-cache-size/5G.yaml
|
overrides:
ceph:
conf:
client:
rbd_persistent_cache_size: 5368709120
| 89
| 14
| 45
|
yaml
|
null |
ceph-main/qa/suites/rbd/pwl-cache/tmpfs/7-workloads/qemu_xfstests.yaml
|
tasks:
- qemu:
client.0:
test: qa/run_xfstests_qemu.sh
type: block
cpus: 4
memory: 4096
disks: 3
| 131
| 13.666667
| 35
|
yaml
|
null |
ceph-main/qa/suites/rbd/qemu/cache/none.yaml
|
tasks:
- install:
- ceph:
conf:
client:
rbd cache: false
| 75
| 9.857143
| 24
|
yaml
|
null |
ceph-main/qa/suites/rbd/qemu/cache/writearound.yaml
|
tasks:
- install:
- ceph:
conf:
client:
rbd cache: true
rbd cache policy: writearound
| 112
| 13.125
| 37
|
yaml
|
null |
ceph-main/qa/suites/rbd/qemu/cache/writeback.yaml
|
tasks:
- install:
- ceph:
conf:
client:
rbd cache: true
rbd cache policy: writeback
| 110
| 12.875
| 35
|
yaml
|
null |
ceph-main/qa/suites/rbd/qemu/cache/writethrough.yaml
|
tasks:
- install:
- ceph:
conf:
client:
rbd cache: true
rbd cache max dirty: 0
| 105
| 12.25
| 30
|
yaml
|
null |
ceph-main/qa/suites/rbd/qemu/clusters/fixed-3.yaml
|
.qa/clusters/fixed-3.yaml
| 25
| 25
| 25
|
yaml
|
null |
ceph-main/qa/suites/rbd/qemu/clusters/openstack.yaml
|
openstack:
- machine:
disk: 40 # GB
ram: 30000 # MB
cpus: 1
volumes: # attached to each instance
count: 4
size: 30 # GB
| 156
| 16.444444
| 40
|
yaml
|
null |
ceph-main/qa/suites/rbd/qemu/features/defaults.yaml
|
overrides:
ceph:
conf:
client:
rbd default features: 61
| 76
| 11.833333
| 32
|
yaml
|
null |
ceph-main/qa/suites/rbd/qemu/features/journaling.yaml
|
overrides:
ceph:
conf:
client:
rbd default features: 125
| 77
| 12
| 33
|
yaml
|
null |
ceph-main/qa/suites/rbd/qemu/features/readbalance.yaml
|
overrides:
ceph:
conf:
client:
rbd read from replica policy: balance
tasks:
- exec:
osd.0:
- ceph osd set-require-min-compat-client octopus
| 171
| 14.636364
| 54
|
yaml
|
null |
ceph-main/qa/suites/rbd/qemu/msgr-failures/few.yaml
|
overrides:
ceph:
conf:
global:
ms inject socket failures: 5000
mon client directed command retry: 5
log-ignorelist:
- but it is still running
- \(OSD_SLOW_PING_TIME
| 205
| 19.6
| 44
|
yaml
|
null |
ceph-main/qa/suites/rbd/qemu/pool/ec-cache-pool.yaml
|
overrides:
ceph:
log-ignorelist:
- overall HEALTH_
- \(CACHE_POOL_NEAR_FULL\)
- \(CACHE_POOL_NO_HIT_SET\)
tasks:
- exec:
client.0:
- sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2
- sudo ceph osd pool delete rbd rbd --yes-i-really-really-mean-it
- sudo ceph osd pool create rbd 4 4 erasure teuthologyprofile
- sudo ceph osd pool create cache 4
- sudo ceph osd tier add rbd cache
- sudo ceph osd tier cache-mode cache writeback
- sudo ceph osd tier set-overlay rbd cache
- sudo ceph osd pool set cache hit_set_type bloom
- sudo ceph osd pool set cache hit_set_count 8
- sudo ceph osd pool set cache hit_set_period 60
- sudo ceph osd pool set cache target_max_objects 250
- rbd pool init rbd
| 833
| 36.909091
| 97
|
yaml
|
null |
ceph-main/qa/suites/rbd/qemu/pool/ec-data-pool.yaml
|
tasks:
- exec:
client.0:
- sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2
- sudo ceph osd pool create datapool 4 4 erasure teuthologyprofile
- sudo ceph osd pool set datapool allow_ec_overwrites true
- rbd pool init datapool
overrides:
thrashosds:
bdev_inject_crash: 2
bdev_inject_crash_probability: .5
ceph:
fs: xfs
conf:
client:
rbd default data pool: datapool
osd: # force bluestore since it's required for ec overwrites
osd objectstore: bluestore
bluestore block size: 96636764160
enable experimental unrecoverable data corrupting features: "*"
osd debug randomize hobject sort order: false
# this doesn't work with failures bc the log writes are not atomic across the two backends
# bluestore bluefs env mirror: true
| 873
| 33.96
| 97
|
yaml
|
null |
ceph-main/qa/suites/rbd/qemu/pool/none.yaml
| 0
| 0
| 0
|
yaml
|
|
null |
ceph-main/qa/suites/rbd/qemu/pool/replicated-data-pool.yaml
|
tasks:
- exec:
client.0:
- sudo ceph osd pool create datapool 4
- rbd pool init datapool
overrides:
ceph:
conf:
client:
rbd default data pool: datapool
| 189
| 14.833333
| 44
|
yaml
|
null |
ceph-main/qa/suites/rbd/qemu/pool/small-cache-pool.yaml
|
overrides:
ceph:
log-ignorelist:
- overall HEALTH_
- \(CACHE_POOL_NEAR_FULL\)
- \(CACHE_POOL_NO_HIT_SET\)
tasks:
- exec:
client.0:
- sudo ceph osd pool create cache 4
- sudo ceph osd tier add rbd cache
- sudo ceph osd tier cache-mode cache writeback
- sudo ceph osd tier set-overlay rbd cache
- sudo ceph osd pool set cache hit_set_type bloom
- sudo ceph osd pool set cache hit_set_count 8
- sudo ceph osd pool set cache hit_set_period 60
- sudo ceph osd pool set cache target_max_objects 250
| 569
| 30.666667
| 59
|
yaml
|
null |
ceph-main/qa/suites/rbd/qemu/workloads/qemu_bonnie.yaml
|
tasks:
- qemu:
all:
clone: true
test: qa/workunits/suites/bonnie.sh
exclude_arch: armv7l
| 105
| 14.142857
| 41
|
yaml
|
null |
ceph-main/qa/suites/rbd/qemu/workloads/qemu_fsstress.yaml
|
tasks:
- qemu:
all:
clone: true
test: qa/workunits/suites/fsstress.sh
exclude_arch: armv7l
| 107
| 14.428571
| 43
|
yaml
|
null |
ceph-main/qa/suites/rbd/qemu/workloads/qemu_xfstests.yaml
|
tasks:
- qemu:
all:
clone: true
type: block
disks: 3
test: qa/run_xfstests_qemu.sh
exclude_arch: armv7l
| 132
| 13.777778
| 35
|
yaml
|
null |
ceph-main/qa/suites/rbd/singleton-bluestore/openstack.yaml
|
openstack:
- volumes: # attached to each instance
count: 3
size: 30 # GB
| 87
| 16.6
| 40
|
yaml
|
null |
ceph-main/qa/suites/rbd/singleton-bluestore/all/issue-20295.yaml
|
roles:
- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
- [mon.b, mgr.y, osd.3, osd.4, osd.5]
- [mon.c, mgr.z, osd.6, osd.7, osd.8]
- [osd.9, osd.10, osd.11]
tasks:
- install:
- ceph:
log-ignorelist:
- 'application not enabled'
- workunit:
timeout: 30m
clients:
all: [rbd/issue-20295.sh]
| 311
| 19.8
| 47
|
yaml
|
null |
ceph-main/qa/suites/rbd/singleton-bluestore/objectstore/bluestore-bitmap.yaml
|
.qa/objectstore/bluestore-bitmap.yaml
| 37
| 37
| 37
|
yaml
|
null |
ceph-main/qa/suites/rbd/singleton-bluestore/objectstore/bluestore-comp-snappy.yaml
|
.qa/objectstore/bluestore-comp-snappy.yaml
| 42
| 42
| 42
|
yaml
|
null |
ceph-main/qa/suites/rbd/singleton/openstack.yaml
|
openstack:
- volumes: # attached to each instance
count: 2
size: 30 # GB
| 87
| 16.6
| 40
|
yaml
|
null |
ceph-main/qa/suites/rbd/singleton/all/admin_socket.yaml
|
roles:
- [mon.a, mgr.x, osd.0, osd.1, client.0]
tasks:
- install:
- ceph:
fs: xfs
- workunit:
clients:
all: [rbd/test_admin_socket.sh]
| 149
| 14
| 40
|
yaml
|
null |
ceph-main/qa/suites/rbd/singleton/all/formatted-output.yaml
|
roles:
- [mon.a, mgr.x, osd.0, osd.1, client.0]
tasks:
- install:
- ceph:
fs: xfs
- cram:
clients:
client.0:
- src/test/cli-integration/rbd/formatted-output.t
| 179
| 15.363636
| 55
|
yaml
|
null |
ceph-main/qa/suites/rbd/singleton/all/merge_diff.yaml
|
roles:
- [mon.a, mgr.x, osd.0, osd.1, client.0]
tasks:
- install:
- ceph:
fs: xfs
- workunit:
clients:
all: [rbd/merge_diff.sh]
| 142
| 13.3
| 40
|
yaml
|
null |
ceph-main/qa/suites/rbd/singleton/all/mon-command-help.yaml
|
roles:
- [mon.a, mgr.x, osd.0, osd.1, client.0]
tasks:
- install:
- ceph:
fs: xfs
- cram:
clients:
client.0:
- src/test/cli-integration/rbd/mon-command-help.t
| 179
| 15.363636
| 55
|
yaml
|
null |
ceph-main/qa/suites/rbd/singleton/all/permissions.yaml
|
roles:
- [mon.a, mgr.x, osd.0, osd.1, client.0]
tasks:
- install:
- ceph:
fs: xfs
- workunit:
clients:
all: [rbd/permissions.sh]
| 143
| 13.4
| 40
|
yaml
|
null |
ceph-main/qa/suites/rbd/singleton/all/qemu-iotests-no-cache.yaml
|
exclude_arch: armv7l
roles:
- [mon.a, mgr.x, osd.0, osd.1, client.0]
tasks:
- install:
extra_system_packages:
rpm:
- qemu-kvm-block-rbd
deb:
- qemu-block-extra
- qemu-utils
- ceph:
fs: xfs
conf:
client:
rbd cache: false
- workunit:
clients:
all: [rbd/qemu-iotests.sh]
| 334
| 15.75
| 40
|
yaml
|
null |
ceph-main/qa/suites/rbd/singleton/all/qemu-iotests-writearound.yaml
|
exclude_arch: armv7l
roles:
- [mon.a, mgr.x, osd.0, osd.1, client.0]
tasks:
- install:
extra_system_packages:
rpm:
- qemu-kvm-block-rbd
deb:
- qemu-block-extra
- qemu-utils
- ceph:
fs: xfs
conf:
client:
rbd cache: true
rbd cache policy: writearound
- workunit:
clients:
all: [rbd/qemu-iotests.sh]
| 371
| 16.714286
| 40
|
yaml
|
null |
ceph-main/qa/suites/rbd/singleton/all/qemu-iotests-writeback.yaml
|
exclude_arch: armv7l
roles:
- [mon.a, mgr.x, osd.0, osd.1, client.0]
tasks:
- install:
extra_system_packages:
rpm:
- qemu-kvm-block-rbd
deb:
- qemu-block-extra
- qemu-utils
- ceph:
fs: xfs
conf:
client:
rbd cache: true
rbd cache policy: writeback
- workunit:
clients:
all: [rbd/qemu-iotests.sh]
| 369
| 16.619048
| 40
|
yaml
|
null |
ceph-main/qa/suites/rbd/singleton/all/qemu-iotests-writethrough.yaml
|
exclude_arch: armv7l
roles:
- [mon.a, mgr.x, osd.0, osd.1, client.0]
tasks:
- install:
extra_system_packages:
rpm:
- qemu-kvm-block-rbd
deb:
- qemu-block-extra
- qemu-utils
- ceph:
fs: xfs
conf:
client:
rbd cache: true
rbd cache max dirty: 0
- workunit:
clients:
all: [rbd/qemu-iotests.sh]
| 364
| 16.380952
| 40
|
yaml
|
null |
ceph-main/qa/suites/rbd/singleton/all/qos.yaml
|
roles:
- [mon.a, mgr.x, osd.0, osd.1, client.0]
tasks:
- install:
- ceph:
fs: xfs
- workunit:
clients:
all: [rbd/qos.sh]
| 135
| 12.6
| 40
|
yaml
|
null |
ceph-main/qa/suites/rbd/singleton/all/rbd-vs-unmanaged-snaps.yaml
|
roles:
- [mon.a, mgr.x, osd.0, osd.1, client.0]
tasks:
- install:
- ceph:
fs: xfs
conf:
client:
rbd validate pool: false
- workunit:
clients:
all:
- mon/rbd_snaps_ops.sh
| 211
| 13.133333
| 40
|
yaml
|
null |
ceph-main/qa/suites/rbd/singleton/all/rbd_mirror.yaml
|
roles:
- [mon.a, mgr.x, osd.0, osd.1, client.0]
tasks:
- install:
- ceph:
fs: xfs
log-ignorelist:
- overall HEALTH_
- \(CACHE_POOL_NO_HIT_SET\)
- \(POOL_APP_NOT_ENABLED\)
- workunit:
clients:
all: [rbd/test_rbd_mirror.sh]
| 258
| 17.5
| 40
|
yaml
|
null |
ceph-main/qa/suites/rbd/singleton/all/rbd_tasks.yaml
|
roles:
- [mon.a, mgr.x, osd.0, osd.1, client.0]
tasks:
- install:
- ceph:
fs: xfs
log-ignorelist:
- overall HEALTH_
- \(CACHE_POOL_NO_HIT_SET\)
- \(POOL_APP_NOT_ENABLED\)
- workunit:
clients:
all: [rbd/test_rbd_tasks.sh]
| 257
| 17.428571
| 40
|
yaml
|
null |
ceph-main/qa/suites/rbd/singleton/all/rbdmap_RBDMAPFILE.yaml
|
roles:
- [client.0]
tasks:
- install:
- workunit:
clients:
all: [rbd/test_rbdmap_RBDMAPFILE.sh]
| 106
| 12.375
| 42
|
yaml
|
null |
ceph-main/qa/suites/rbd/singleton/all/read-flags-no-cache.yaml
|
roles:
- [mon.a, mgr.x, osd.0, osd.1, client.0]
tasks:
- install:
- ceph:
fs: xfs
conf:
client:
rbd cache: false
- workunit:
clients:
all: [rbd/read-flags.sh]
| 191
| 13.769231
| 40
|
yaml
|
null |
ceph-main/qa/suites/rbd/singleton/all/read-flags-writeback.yaml
|
roles:
- [mon.a, mgr.x, osd.0, osd.1, client.0]
tasks:
- install:
- ceph:
fs: xfs
conf:
client:
rbd cache: true
rbd cache policy: writeback
- workunit:
clients:
all: [rbd/read-flags.sh]
| 226
| 15.214286
| 40
|
yaml
|
null |
ceph-main/qa/suites/rbd/singleton/all/read-flags-writethrough.yaml
|
roles:
- [mon.a, mgr.x, osd.0, osd.1, client.0]
tasks:
- install:
- ceph:
fs: xfs
conf:
client:
rbd cache: true
rbd cache max dirty: 0
- workunit:
clients:
all: [rbd/read-flags.sh]
| 221
| 14.857143
| 40
|
yaml
|
null |
ceph-main/qa/suites/rbd/singleton/all/snap-diff.yaml
|
roles:
- [mon.a, mgr.x, osd.0, osd.1, client.0]
tasks:
- install:
- ceph:
fs: xfs
- cram:
clients:
client.0:
- src/test/cli-integration/rbd/snap-diff.t
| 172
| 14.727273
| 48
|
yaml
|
null |
ceph-main/qa/suites/rbd/singleton/all/verify_pool.yaml
|
roles:
- [mon.a, mgr.x, osd.0, osd.1, client.0]
tasks:
- install:
- ceph:
fs: xfs
- workunit:
clients:
all: [rbd/verify_pool.sh]
| 143
| 13.4
| 40
|
yaml
|
null |
ceph-main/qa/suites/rbd/thrash/thrashosds-health.yaml
|
.qa/tasks/thrashosds-health.yaml
| 32
| 32
| 32
|
yaml
|
null |
ceph-main/qa/suites/rbd/thrash/base/install.yaml
|
tasks:
- install:
- ceph:
| 26
| 5.75
| 10
|
yaml
|
null |
ceph-main/qa/suites/rbd/thrash/clusters/fixed-2.yaml
|
.qa/clusters/fixed-2.yaml
| 25
| 25
| 25
|
yaml
|
null |
ceph-main/qa/suites/rbd/thrash/clusters/openstack.yaml
|
openstack:
- machine:
disk: 40 # GB
ram: 8000 # MB
cpus: 1
volumes: # attached to each instance
count: 4
size: 30 # GB
| 155
| 16.333333
| 40
|
yaml
|
null |
ceph-main/qa/suites/rbd/thrash/msgr-failures/few.yaml
|
overrides:
ceph:
conf:
global:
ms inject socket failures: 5000
mon client directed command retry: 5
log-ignorelist:
- \(OSD_SLOW_PING_TIME
| 177
| 18.777778
| 44
|
yaml
|
null |
ceph-main/qa/suites/rbd/thrash/thrashers/cache.yaml
|
overrides:
ceph:
log-ignorelist:
- but it is still running
- objects unfound and apparently lost
- overall HEALTH_
- \(CACHE_POOL_NEAR_FULL\)
- \(CACHE_POOL_NO_HIT_SET\)
tasks:
- exec:
client.0:
- sudo ceph osd pool create cache 4
- sudo ceph osd tier add rbd cache
- sudo ceph osd tier cache-mode cache writeback
- sudo ceph osd tier set-overlay rbd cache
- sudo ceph osd pool set cache hit_set_type bloom
- sudo ceph osd pool set cache hit_set_count 8
- sudo ceph osd pool set cache hit_set_period 60
- sudo ceph osd pool set cache target_max_objects 250
- thrashosds:
timeout: 1200
| 677
| 29.818182
| 59
|
yaml
|
null |
ceph-main/qa/suites/rbd/thrash/thrashers/default.yaml
|
overrides:
ceph:
log-ignorelist:
- but it is still running
- objects unfound and apparently lost
tasks:
- thrashosds:
timeout: 1200
| 150
| 15.777778
| 41
|
yaml
|
null |
ceph-main/qa/suites/rbd/thrash/workloads/journal.yaml
|
tasks:
- workunit:
clients:
client.0:
- rbd/journal.sh
| 73
| 11.333333
| 24
|
yaml
|
null |
ceph-main/qa/suites/rbd/thrash/workloads/rbd_api_tests.yaml
|
overrides:
ceph:
log-ignorelist:
- overall HEALTH_
- \(CACHE_POOL_NO_HIT_SET\)
- \(POOL_APP_NOT_ENABLED\)
- is full \(reached quota
- \(POOL_FULL\)
tasks:
- workunit:
clients:
client.0:
- rbd/test_librbd.sh
env:
RBD_FEATURES: "61"
| 295
| 17.5
| 33
|
yaml
|
null |
ceph-main/qa/suites/rbd/thrash/workloads/rbd_api_tests_copy_on_read.yaml
|
tasks:
- workunit:
clients:
client.0:
- rbd/test_librbd.sh
env:
RBD_FEATURES: "61"
overrides:
ceph:
log-ignorelist:
- overall HEALTH_
- \(CACHE_POOL_NO_HIT_SET\)
- \(POOL_APP_NOT_ENABLED\)
- is full \(reached quota
- \(POOL_FULL\)
conf:
client:
rbd clone copy on read: true
| 356
| 17.789474
| 36
|
yaml
|
null |
ceph-main/qa/suites/rbd/thrash/workloads/rbd_api_tests_journaling.yaml
|
overrides:
ceph:
log-ignorelist:
- overall HEALTH_
- \(CACHE_POOL_NO_HIT_SET\)
- \(POOL_APP_NOT_ENABLED\)
- is full \(reached quota
- \(POOL_FULL\)
tasks:
- workunit:
clients:
client.0:
- rbd/test_librbd.sh
env:
RBD_FEATURES: "125"
| 296
| 17.5625
| 33
|
yaml
|
null |
ceph-main/qa/suites/rbd/thrash/workloads/rbd_api_tests_no_locking.yaml
|
overrides:
ceph:
log-ignorelist:
- overall HEALTH_
- \(CACHE_POOL_NO_HIT_SET\)
- \(POOL_APP_NOT_ENABLED\)
- is full \(reached quota
- \(POOL_FULL\)
tasks:
- workunit:
clients:
client.0:
- rbd/test_librbd.sh
env:
RBD_FEATURES: "1"
| 294
| 17.4375
| 33
|
yaml
|
null |
ceph-main/qa/suites/rbd/thrash/workloads/rbd_fsx_cache_writearound.yaml
|
tasks:
- rbd_fsx:
clients: [client.0]
ops: 6000
overrides:
ceph:
conf:
client:
rbd cache: true
rbd cache policy: writearound
| 161
| 13.727273
| 37
|
yaml
|
null |
ceph-main/qa/suites/rbd/thrash/workloads/rbd_fsx_cache_writeback.yaml
|
tasks:
- rbd_fsx:
clients: [client.0]
ops: 6000
overrides:
ceph:
conf:
client:
rbd cache: true
rbd cache policy: writeback
| 159
| 13.545455
| 35
|
yaml
|
null |
ceph-main/qa/suites/rbd/thrash/workloads/rbd_fsx_cache_writethrough.yaml
|
tasks:
- rbd_fsx:
clients: [client.0]
ops: 6000
overrides:
ceph:
conf:
client:
rbd cache: true
rbd cache max dirty: 0
| 154
| 13.090909
| 30
|
yaml
|
null |
ceph-main/qa/suites/rbd/thrash/workloads/rbd_fsx_copy_on_read.yaml
|
tasks:
- rbd_fsx:
clients: [client.0]
ops: 6000
overrides:
ceph:
conf:
client:
rbd cache: true
rbd clone copy on read: true
| 160
| 13.636364
| 36
|
yaml
|
null |
ceph-main/qa/suites/rbd/thrash/workloads/rbd_fsx_deep_copy.yaml
|
tasks:
- rbd_fsx:
clients: [client.0]
ops: 6000
deep_copy: True
| 76
| 11.833333
| 23
|
yaml
|
null |
ceph-main/qa/suites/rbd/thrash/workloads/rbd_fsx_journal.yaml
|
tasks:
- rbd_fsx:
clients: [client.0]
ops: 6000
journal_replay: True
| 81
| 12.666667
| 24
|
yaml
|
null |
ceph-main/qa/suites/rbd/thrash/workloads/rbd_fsx_nocache.yaml
|
tasks:
- rbd_fsx:
clients: [client.0]
ops: 6000
overrides:
ceph:
conf:
client:
rbd cache: false
| 124
| 11.5
| 24
|
yaml
|
null |
ceph-main/qa/suites/rbd/thrash/workloads/rbd_fsx_rate_limit.yaml
|
tasks:
- rbd_fsx:
clients: [client.0]
ops: 6000
overrides:
ceph:
conf:
client:
rbd qos iops limit: 50
rbd qos iops burst: 100
rbd qos schedule tick min: 100
| 201
| 15.833333
| 38
|
yaml
|
null |
ceph-main/qa/suites/rbd/valgrind/centos_latest.yaml
|
.qa/distros/supported/centos_latest.yaml
| 40
| 40
| 40
|
yaml
|
null |
ceph-main/qa/suites/rbd/valgrind/base/install.yaml
|
tasks:
- install:
- ceph:
| 26
| 5.75
| 10
|
yaml
|
null |
ceph-main/qa/suites/rbd/valgrind/validator/memcheck.yaml
|
# see http://tracker.ceph.com/issues/20360 and http://tracker.ceph.com/issues/18126
os_type: centos
overrides:
install:
ceph:
debuginfo: true
rbd_fsx:
valgrind: ["--tool=memcheck"]
workunit:
env:
VALGRIND: "--tool=memcheck --leak-check=full"
| 273
| 20.076923
| 83
|
yaml
|
null |
ceph-main/qa/suites/rbd/valgrind/workloads/c_api_tests.yaml
|
overrides:
ceph:
log-ignorelist:
- overall HEALTH_
- \(CACHE_POOL_NO_HIT_SET\)
- \(POOL_APP_NOT_ENABLED\)
- is full \(reached quota
- \(POOL_FULL\)
tasks:
- workunit:
clients:
client.0:
- rbd/test_librbd.sh
env:
RBD_FEATURES: "1"
| 294
| 17.4375
| 33
|
yaml
|
null |
ceph-main/qa/suites/rbd/valgrind/workloads/c_api_tests_with_defaults.yaml
|
overrides:
ceph:
log-ignorelist:
- overall HEALTH_
- \(CACHE_POOL_NO_HIT_SET\)
- \(POOL_APP_NOT_ENABLED\)
- is full \(reached quota
- \(POOL_FULL\)
tasks:
- workunit:
clients:
client.0:
- rbd/test_librbd.sh
env:
RBD_FEATURES: "61"
| 295
| 17.5
| 33
|
yaml
|
null |
ceph-main/qa/suites/rbd/valgrind/workloads/c_api_tests_with_journaling.yaml
|
overrides:
ceph:
log-ignorelist:
- overall HEALTH_
- \(CACHE_POOL_NO_HIT_SET\)
- \(POOL_APP_NOT_ENABLED\)
- is full \(reached quota
- \(POOL_FULL\)
tasks:
- workunit:
clients:
client.0:
- rbd/test_librbd.sh
env:
RBD_FEATURES: "125"
| 296
| 17.5625
| 33
|
yaml
|
null |
ceph-main/qa/suites/rbd/valgrind/workloads/fsx.yaml
|
tasks:
- rbd_fsx:
clients: [client.0]
size: 134217728
| 62
| 11.6
| 23
|
yaml
|
null |
ceph-main/qa/suites/rbd/valgrind/workloads/python_api_tests.yaml
|
tasks:
- workunit:
clients:
client.0:
- rbd/test_librbd_python.sh
env:
RBD_FEATURES: "1"
| 117
| 13.75
| 35
|
yaml
|
null |
ceph-main/qa/suites/rbd/valgrind/workloads/python_api_tests_with_defaults.yaml
|
tasks:
- workunit:
clients:
client.0:
- rbd/test_librbd_python.sh
env:
RBD_FEATURES: "61"
| 118
| 13.875
| 35
|
yaml
|
null |
ceph-main/qa/suites/rbd/valgrind/workloads/python_api_tests_with_journaling.yaml
|
tasks:
- workunit:
clients:
client.0:
- rbd/test_librbd_python.sh
env:
RBD_FEATURES: "125"
| 119
| 14
| 35
|
yaml
|
null |
ceph-main/qa/suites/rbd/valgrind/workloads/rbd_mirror.yaml
|
overrides:
ceph:
log-ignorelist:
- overall HEALTH_
- \(CACHE_POOL_NO_HIT_SET\)
- \(POOL_APP_NOT_ENABLED\)
tasks:
- workunit:
clients:
client.0:
- rbd/test_rbd_mirror.sh
| 211
| 16.666667
| 33
|
yaml
|