Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
|---|---|---|---|---|---|---|
null |
ceph-main/qa/suites/rados/thrash/2-recovery-overrides/default.yaml
| 0
| 0
| 0
|
yaml
|
|
null |
ceph-main/qa/suites/rados/thrash/2-recovery-overrides/more-active-recovery.yaml
|
.qa/overrides/more-active-recovery.yaml
| 39
| 39
| 39
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/2-recovery-overrides/more-async-partial-recovery.yaml
|
overrides:
ceph:
conf:
global:
osd_async_recovery_min_cost: 1
osd_object_clean_region_max_num_intervals: 1000
| 138
| 18.857143
| 55
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/2-recovery-overrides/more-async-recovery.yaml
|
overrides:
ceph:
conf:
global:
osd_async_recovery_min_cost: 1
| 82
| 12.833333
| 38
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/2-recovery-overrides/more-partial-recovery.yaml
|
overrides:
ceph:
conf:
global:
osd_object_clean_region_max_num_intervals: 1000
| 99
| 15.666667
| 55
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/3-scrub-overrides/default.yaml
| 0
| 0
| 0
|
yaml
|
|
null |
ceph-main/qa/suites/rados/thrash/3-scrub-overrides/max-simultaneous-scrubs-2.yaml
|
overrides:
ceph:
conf:
osd:
osd max scrubs: 2
| 66
| 10.166667
| 25
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/3-scrub-overrides/max-simultaneous-scrubs-3.yaml
|
overrides:
ceph:
conf:
osd:
osd max scrubs: 3
| 66
| 10.166667
| 25
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/backoff/normal.yaml
| 0
| 0
| 0
|
yaml
|
|
null |
ceph-main/qa/suites/rados/thrash/backoff/peering.yaml
|
overrides:
ceph:
conf:
osd:
osd backoff on peering: true
| 77
| 12
| 36
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/backoff/peering_and_degraded.yaml
|
overrides:
ceph:
conf:
osd:
osd backoff on peering: true
osd backoff on degraded: true
| 115
| 15.571429
| 37
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/clusters/fixed-2.yaml
|
.qa/clusters/fixed-2.yaml
| 25
| 25
| 25
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/clusters/openstack.yaml
|
openstack:
- volumes: # attached to each instance
count: 4
size: 30 # GB
| 87
| 16.6
| 40
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/crc-failures/bad_map_crc_failure.yaml
|
overrides:
ceph:
conf:
osd:
osd inject bad map crc probability: 0.1
log-ignorelist:
- failed to encode map
| 137
| 16.25
| 47
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/crc-failures/default.yaml
| 0
| 0
| 0
|
yaml
|
|
null |
ceph-main/qa/suites/rados/thrash/d-balancer/crush-compat.yaml
|
tasks:
- exec:
mon.a:
- while ! ceph balancer status ; do sleep 1 ; done
- ceph balancer mode crush-compat
- ceph balancer on
| 148
| 20.285714
| 56
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/d-balancer/on.yaml
| 0
| 0
| 0
|
yaml
|
|
null |
ceph-main/qa/suites/rados/thrash/msgr-failures/fastclose.yaml
|
overrides:
ceph:
conf:
global:
ms inject socket failures: 2500
ms tcp read timeout: 5
mon client directed command retry: 5
log-ignorelist:
- \(OSD_SLOW_PING_TIME
| 208
| 19.9
| 44
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/msgr-failures/few.yaml
|
overrides:
ceph:
conf:
global:
ms inject socket failures: 5000
mon client directed command retry: 5
osd:
osd heartbeat use min delay socket: true
log-ignorelist:
- \(OSD_SLOW_PING_TIME
| 237
| 20.636364
| 48
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/msgr-failures/osd-delay.yaml
|
overrides:
ceph:
conf:
global:
ms inject socket failures: 2500
ms inject delay type: osd
ms inject delay probability: .005
ms inject delay max: 1
ms inject internal delays: .002
mon client directed command retry: 5
log-ignorelist:
- \(OSD_SLOW_PING_TIME
| 324
| 24
| 44
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/msgr-failures/osd-dispatch-delay.yaml
|
overrides:
ceph:
conf:
global:
osd debug inject dispatch delay duration: 0.1
osd debug inject dispatch delay probability: 0.1
| 155
| 18.5
| 56
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/thrashers/careful.yaml
|
overrides:
ceph:
log-ignorelist:
- but it is still running
- objects unfound and apparently lost
conf:
osd:
osd debug reject backfill probability: .3
osd scrub min interval: 60
osd scrub max interval: 120
osd max backfills: 3
osd snap trim sleep: 2
mon:
mon min osdmap epochs: 50
paxos service trim min: 10
# prune full osdmaps regularly
mon osdmap full prune min: 15
mon osdmap full prune interval: 2
mon osdmap full prune txsize: 2
tasks:
- thrashosds:
timeout: 1200
chance_pgnum_grow: 1
chance_pgnum_shrink: 1
chance_pgpnum_fix: 1
aggressive_pg_num_changes: false
| 705
| 25.148148
| 49
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/thrashers/default.yaml
|
overrides:
ceph:
log-ignorelist:
- but it is still running
- objects unfound and apparently lost
conf:
osd:
osd debug reject backfill probability: .3
osd scrub min interval: 60
osd scrub max interval: 120
osd max backfills: 3
osd snap trim sleep: 2
osd delete sleep: 1
mon:
mon min osdmap epochs: 50
paxos service trim min: 10
# prune full osdmaps regularly
mon osdmap full prune min: 15
mon osdmap full prune interval: 2
mon osdmap full prune txsize: 2
tasks:
- thrashosds:
timeout: 1200
chance_pgnum_grow: 1
chance_pgnum_shrink: 1
chance_pgpnum_fix: 1
chance_bluestore_reshard: 1
bluestore_new_sharding: random
| 763
| 25.344828
| 49
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/thrashers/mapgap.yaml
|
overrides:
ceph:
log-ignorelist:
- but it is still running
- objects unfound and apparently lost
- osd_map_cache_size
conf:
mon:
mon min osdmap epochs: 50
paxos service trim min: 10
# prune full osdmaps regularly
mon osdmap full prune min: 15
mon osdmap full prune interval: 2
mon osdmap full prune txsize: 2
osd:
osd map cache size: 1
osd scrub min interval: 60
osd scrub max interval: 120
osd scrub during recovery: false
osd max backfills: 6
tasks:
- thrashosds:
timeout: 1800
chance_pgnum_grow: 0.25
chance_pgnum_shrink: 0.25
chance_pgpnum_fix: 0.25
chance_test_map_discontinuity: 2
| 729
| 25.071429
| 41
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/thrashers/morepggrow.yaml
|
overrides:
ceph:
conf:
osd:
osd scrub min interval: 60
osd scrub max interval: 120
journal throttle high multiple: 2
journal throttle max multiple: 10
filestore queue throttle high multiple: 2
filestore queue throttle max multiple: 10
osd max backfills: 9
log-ignorelist:
- but it is still running
- objects unfound and apparently lost
tasks:
- thrashosds:
timeout: 1200
chance_pgnum_grow: 3
chance_pgpnum_fix: 1
openstack:
- volumes:
size: 50
| 540
| 22.521739
| 49
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/thrashers/none.yaml
| 0
| 0
| 0
|
yaml
|
|
null |
ceph-main/qa/suites/rados/thrash/thrashers/pggrow.yaml
|
overrides:
ceph:
log-ignorelist:
- but it is still running
- objects unfound and apparently lost
conf:
osd:
osd scrub min interval: 60
osd scrub max interval: 120
filestore odsync write: true
osd max backfills: 2
osd snap trim sleep: .5
mon:
mon min osdmap epochs: 50
paxos service trim min: 10
# prune full osdmaps regularly
mon osdmap full prune min: 15
mon osdmap full prune interval: 2
mon osdmap full prune txsize: 2
tasks:
- thrashosds:
timeout: 1200
chance_pgnum_grow: 2
chance_pgpnum_fix: 1
| 629
| 24.2
| 41
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/workloads/admin_socket_objecter_requests.yaml
|
overrides:
ceph:
conf:
client.0:
admin socket: /var/run/ceph/ceph-$name.asok
tasks:
- radosbench:
clients: [client.0]
time: 150
- admin_socket:
client.0:
objecter_requests:
test: "http://git.ceph.com/?p={repo};a=blob_plain;f=src/test/admin_socket/objecter_requests;hb={branch}"
| 324
| 22.214286
| 112
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/workloads/cache-agent-big.yaml
|
overrides:
ceph:
log-ignorelist:
- must scrub before tier agent can activate
conf:
osd:
# override short_pg_log_entries.yaml (which sets these under [global])
osd_min_pg_log_entries: 3000
osd_max_pg_log_entries: 3000
tasks:
- exec:
client.0:
- sudo ceph osd erasure-code-profile set myprofile crush-failure-domain=osd m=2 k=2
- sudo ceph osd pool create base 4 4 erasure myprofile
- sudo ceph osd pool application enable base rados
- sudo ceph osd pool set base min_size 2
- sudo ceph osd pool create cache 4
- sudo ceph osd tier add base cache
- sudo ceph osd tier cache-mode cache writeback
- sudo ceph osd tier set-overlay base cache
- sudo ceph osd pool set cache hit_set_type bloom
- sudo ceph osd pool set cache hit_set_count 8
- sudo ceph osd pool set cache hit_set_period 60
- sudo ceph osd pool set cache target_max_objects 5000
- rados:
clients: [client.0]
pools: [base]
ops: 10000
objects: 6600
max_seconds: 1200
size: 1024
op_weights:
read: 100
write: 100
delete: 50
copy_from: 50
| 1,167
| 30.567568
| 89
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/workloads/cache-agent-small.yaml
|
overrides:
ceph:
log-ignorelist:
- must scrub before tier agent can activate
conf:
osd:
# override short_pg_log_entries.yaml (which sets these under [global])
osd_min_pg_log_entries: 3000
osd_max_pg_log_entries: 3000
tasks:
- exec:
client.0:
- sudo ceph osd pool create base 4
- sudo ceph osd pool application enable base rados
- sudo ceph osd pool create cache 4
- sudo ceph osd tier add base cache
- sudo ceph osd tier cache-mode cache writeback
- sudo ceph osd tier set-overlay base cache
- sudo ceph osd pool set cache hit_set_type bloom
- sudo ceph osd pool set cache hit_set_count 8
- sudo ceph osd pool set cache hit_set_period 60
- sudo ceph osd pool set cache target_max_objects 250
- sudo ceph osd pool set cache min_read_recency_for_promote 2
- sudo ceph osd pool set cache min_write_recency_for_promote 2
- rados:
clients: [client.0]
pools: [base]
ops: 4000
objects: 500
op_weights:
read: 100
write: 100
delete: 50
copy_from: 50
| 1,107
| 30.657143
| 78
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/workloads/cache-pool-snaps-readproxy.yaml
|
overrides:
ceph:
log-ignorelist:
- must scrub before tier agent can activate
conf:
osd:
# override short_pg_log_entries.yaml (which sets these under [global])
osd_min_pg_log_entries: 3000
osd_max_pg_log_entries: 3000
tasks:
- exec:
client.0:
- sudo ceph osd pool create base 4
- sudo ceph osd pool application enable base rados
- sudo ceph osd pool create cache 4
- sudo ceph osd tier add base cache
- sudo ceph osd tier cache-mode cache readproxy
- sudo ceph osd tier set-overlay base cache
- sudo ceph osd pool set cache hit_set_type bloom
- sudo ceph osd pool set cache hit_set_count 8
- sudo ceph osd pool set cache hit_set_period 3600
- sudo ceph osd pool set cache target_max_objects 250
- rados:
clients: [client.0]
pools: [base]
ops: 4000
objects: 500
pool_snaps: true
op_weights:
read: 100
write: 100
delete: 50
copy_from: 50
cache_flush: 50
cache_try_flush: 50
cache_evict: 50
snap_create: 50
snap_remove: 50
rollback: 50
| 1,126
| 27.175
| 78
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/workloads/cache-pool-snaps.yaml
|
overrides:
ceph:
log-ignorelist:
- must scrub before tier agent can activate
conf:
osd:
# override short_pg_log_entries.yaml (which sets these under [global])
osd_min_pg_log_entries: 3000
osd_max_pg_log_entries: 3000
tasks:
- exec:
client.0:
- sudo ceph osd pool create base 4
- sudo ceph osd pool application enable base rados
- sudo ceph osd pool create cache 4
- sudo ceph osd tier add base cache
- sudo ceph osd tier cache-mode cache writeback
- sudo ceph osd tier set-overlay base cache
- sudo ceph osd pool set cache hit_set_type bloom
- sudo ceph osd pool set cache hit_set_count 8
- sudo ceph osd pool set cache hit_set_period 3600
- sudo ceph osd pool set cache target_max_objects 250
- sudo ceph osd pool set cache min_read_recency_for_promote 0
- sudo ceph osd pool set cache min_write_recency_for_promote 0
- rados:
clients: [client.0]
pools: [base]
ops: 4000
objects: 500
pool_snaps: true
op_weights:
read: 100
write: 100
delete: 50
copy_from: 50
cache_flush: 50
cache_try_flush: 50
cache_evict: 50
snap_create: 50
snap_remove: 50
rollback: 50
openstack:
- machine:
ram: 15000 # MB
| 1,309
| 28.111111
| 78
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/workloads/cache-snaps-balanced.yaml
|
overrides:
ceph:
log-ignorelist:
- must scrub before tier agent can activate
conf:
osd:
# override short_pg_log_entries.yaml (which sets these under [global])
osd_min_pg_log_entries: 3000
osd_max_pg_log_entries: 3000
tasks:
- exec:
client.0:
- sudo ceph osd pool create base 4
- sudo ceph osd pool application enable base rados
- sudo ceph osd pool create cache 4
- sudo ceph osd tier add base cache
- sudo ceph osd tier cache-mode cache writeback
- sudo ceph osd tier set-overlay base cache
- sudo ceph osd pool set cache hit_set_type bloom
- sudo ceph osd pool set cache hit_set_count 8
- sudo ceph osd pool set cache hit_set_period 3600
- sudo ceph osd pool set cache target_max_objects 250
- sudo ceph osd pool set cache min_read_recency_for_promote 2
- rados:
clients: [client.0]
pools: [base]
ops: 4000
objects: 500
balance_reads: true
op_weights:
read: 100
write: 100
delete: 50
copy_from: 50
cache_flush: 50
cache_try_flush: 50
cache_evict: 50
snap_create: 50
snap_remove: 50
rollback: 50
| 1,197
| 28.219512
| 78
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/workloads/cache-snaps.yaml
|
overrides:
ceph:
log-ignorelist:
- must scrub before tier agent can activate
conf:
osd:
# override short_pg_log_entries.yaml (which sets these under [global])
osd_min_pg_log_entries: 3000
osd_max_pg_log_entries: 3000
tasks:
- exec:
client.0:
- sudo ceph osd pool create base 4
- sudo ceph osd pool application enable base rados
- sudo ceph osd pool create cache 4
- sudo ceph osd tier add base cache
- sudo ceph osd tier cache-mode cache writeback
- sudo ceph osd tier set-overlay base cache
- sudo ceph osd pool set cache hit_set_type bloom
- sudo ceph osd pool set cache hit_set_count 8
- sudo ceph osd pool set cache hit_set_period 3600
- sudo ceph osd pool set cache target_max_objects 250
- sudo ceph osd pool set cache min_read_recency_for_promote 2
- rados:
clients: [client.0]
pools: [base]
ops: 4000
objects: 500
op_weights:
read: 100
write: 100
delete: 50
copy_from: 50
cache_flush: 50
cache_try_flush: 50
cache_evict: 50
snap_create: 50
snap_remove: 50
rollback: 50
| 1,173
| 28.35
| 78
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/workloads/cache.yaml
|
overrides:
ceph:
log-ignorelist:
- must scrub before tier agent can activate
conf:
osd:
# override short_pg_log_entries.yaml (which sets these under [global])
osd_min_pg_log_entries: 3000
osd_max_pg_log_entries: 3000
tasks:
- exec:
client.0:
- sudo ceph osd pool create base 4
- sudo ceph osd pool application enable base rados
- sudo ceph osd pool create cache 4
- sudo ceph osd tier add base cache
- sudo ceph osd tier cache-mode cache writeback
- sudo ceph osd tier set-overlay base cache
- sudo ceph osd pool set cache hit_set_type bloom
- sudo ceph osd pool set cache hit_set_count 8
- sudo ceph osd pool set cache hit_set_period 3600
- sudo ceph osd pool set cache min_read_recency_for_promote 0
- sudo ceph osd pool set cache min_write_recency_for_promote 0
- rados:
clients: [client.0]
pools: [base]
ops: 4000
objects: 500
op_weights:
read: 100
write: 100
delete: 50
copy_from: 50
cache_flush: 50
cache_try_flush: 50
cache_evict: 50
| 1,119
| 29.27027
| 78
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/workloads/dedup-io-mixed.yaml
|
tasks:
- exec:
client.0:
- sudo ceph osd pool create low_tier 4
- rados:
clients: [client.0]
low_tier_pool: 'low_tier'
ops: 1500
objects: 50
set_chunk: true
enable_dedup: true
dedup_chunk_size: '131072'
dedup_chunk_algo: 'fastcdc'
op_weights:
read: 100
write: 50
set_chunk: 30
tier_promote: 10
tier_flush: 5
tier_evict: 10
| 405
| 18.333333
| 44
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/workloads/dedup-io-snaps.yaml
|
tasks:
- exec:
client.0:
- sudo ceph osd pool create low_tier 4
- rados:
clients: [client.0]
low_tier_pool: 'low_tier'
ops: 1500
objects: 50
set_chunk: true
enable_dedup: true
dedup_chunk_size: '131072'
dedup_chunk_algo: 'fastcdc'
op_weights:
read: 100
write: 50
set_chunk: 30
tier_promote: 10
tier_flush: 5
tier_evict: 10
snap_create: 10
snap_remove: 10
rollback: 10
| 468
| 18.541667
| 44
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/workloads/pool-snaps-few-objects.yaml
|
overrides:
conf:
osd:
osd deep scrub update digest min age: 0
tasks:
- rados:
clients: [client.0]
ops: 4000
objects: 50
pool_snaps: true
op_weights:
read: 100
write: 100
delete: 50
snap_create: 50
snap_remove: 50
rollback: 50
copy_from: 50
| 314
| 15.578947
| 45
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/workloads/rados_api_tests.yaml
|
overrides:
ceph:
log-ignorelist:
- reached quota
- \(POOL_APP_NOT_ENABLED\)
- \(PG_AVAILABILITY\)
crush_tunables: jewel
conf:
client:
debug ms: 1
debug objecter: 20
debug rados: 20
mon:
mon warn on pool no app: false
debug mgrc: 20
osd:
osd class load list: "*"
osd class default list: "*"
tasks:
- workunit:
clients:
client.0:
- rados/test.sh
| 468
| 18.541667
| 38
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/workloads/radosbench-high-concurrency.yaml
|
overrides:
ceph:
conf:
client.0:
debug ms: 1
debug objecter: 20
debug rados: 20
tasks:
- full_sequential:
- radosbench:
clients: [client.0]
concurrency: 128
size: 8192
time: 90
- radosbench:
clients: [client.0]
concurrency: 128
size: 8192
time: 90
- radosbench:
clients: [client.0]
concurrency: 128
size: 8192
time: 90
- radosbench:
clients: [client.0]
concurrency: 128
size: 8192
time: 90
- radosbench:
clients: [client.0]
concurrency: 128
size: 8192
time: 90
- radosbench:
clients: [client.0]
concurrency: 128
size: 8192
time: 90
- radosbench:
clients: [client.0]
concurrency: 128
size: 8192
time: 90
- radosbench:
clients: [client.0]
concurrency: 128
size: 8192
time: 90
| 918
| 17.38
| 26
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/workloads/radosbench.yaml
|
overrides:
ceph:
conf:
client.0:
debug ms: 1
debug objecter: 20
debug rados: 20
tasks:
- full_sequential:
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
| 427
| 16.12
| 26
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/workloads/redirect.yaml
|
tasks:
- exec:
client.0:
- sudo ceph osd pool create low_tier 4
- rados:
clients: [client.0]
low_tier_pool: 'low_tier'
ops: 4000
objects: 500
set_redirect: true
op_weights:
read: 100
write: 100
delete: 50
copy_from: 50
| 277
| 16.375
| 44
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/workloads/redirect_promote_tests.yaml
|
tasks:
- exec:
client.0:
- sudo ceph osd pool create low_tier 4
- rados:
clients: [client.0]
low_tier_pool: 'low_tier'
ops: 4000
objects: 500
set_redirect: true
op_weights:
set_redirect: 100
read: 50
tier_promote: 30
| 269
| 17
| 44
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/workloads/redirect_set_object.yaml
|
tasks:
- exec:
client.0:
- sudo ceph osd pool create low_tier 4
- rados:
clients: [client.0]
low_tier_pool: 'low_tier'
ops: 4000
objects: 500
set_redirect: true
op_weights:
set_redirect: 100
copy_from: 100
| 252
| 17.071429
| 44
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/workloads/set-chunks-read.yaml
|
tasks:
- exec:
client.0:
- sudo ceph osd pool create low_tier 4
- rados:
clients: [client.0]
low_tier_pool: 'low_tier'
ops: 4000
objects: 300
set_chunk: true
op_weights:
chunk_read: 100
tier_promote: 10
| 249
| 16.857143
| 44
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/workloads/small-objects-balanced.yaml
|
overrides:
ceph:
crush_tunables: jewel
tasks:
- rados:
clients: [client.0]
ops: 400000
max_seconds: 600
max_in_flight: 64
objects: 1024
size: 16384
balance_reads: true
op_weights:
read: 100
write: 100
delete: 50
snap_create: 50
snap_remove: 50
rollback: 50
copy_from: 50
setattr: 25
rmattr: 25
| 386
| 15.826087
| 25
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/workloads/small-objects-localized.yaml
|
overrides:
ceph:
crush_tunables: jewel
tasks:
- rados:
clients: [client.0]
ops: 400000
max_seconds: 600
max_in_flight: 64
objects: 1024
size: 16384
localize_reads: true
op_weights:
read: 100
write: 100
delete: 50
snap_create: 50
snap_remove: 50
rollback: 50
copy_from: 50
setattr: 25
rmattr: 25
| 387
| 15.869565
| 25
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/workloads/small-objects.yaml
|
overrides:
ceph:
crush_tunables: jewel
tasks:
- rados:
clients: [client.0]
ops: 400000
max_seconds: 600
max_in_flight: 64
objects: 1024
size: 16384
op_weights:
read: 100
write: 100
delete: 50
snap_create: 50
snap_remove: 50
rollback: 50
copy_from: 50
setattr: 25
rmattr: 25
| 362
| 15.5
| 25
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/workloads/snaps-few-objects-balanced.yaml
|
tasks:
- rados:
clients: [client.0]
ops: 4000
objects: 50
balance_reads: true
op_weights:
read: 100
write: 100
delete: 50
snap_create: 50
snap_remove: 50
rollback: 50
copy_from: 50
| 243
| 15.266667
| 23
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/workloads/snaps-few-objects-localized.yaml
|
tasks:
- rados:
clients: [client.0]
ops: 4000
objects: 50
localize_reads: true
op_weights:
read: 100
write: 100
delete: 50
snap_create: 50
snap_remove: 50
rollback: 50
copy_from: 50
| 244
| 15.333333
| 24
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/workloads/snaps-few-objects.yaml
|
tasks:
- rados:
clients: [client.0]
ops: 4000
objects: 50
op_weights:
read: 100
write: 100
delete: 50
snap_create: 50
snap_remove: 50
rollback: 50
copy_from: 50
| 219
| 14.714286
| 23
|
yaml
|
null |
ceph-main/qa/suites/rados/thrash/workloads/write_fadvise_dontneed.yaml
|
tasks:
- rados:
clients: [client.0]
ops: 4000
objects: 500
write_fadvise_dontneed: true
op_weights:
write: 100
| 137
| 14.333333
| 32
|
yaml
|
null |
ceph-main/qa/suites/rados/valgrind-leaks/1-start.yaml
|
openstack:
- volumes: # attached to each instance
count: 2
size: 10 # GB
overrides:
install:
ceph:
debuginfo: true
ceph:
log-ignorelist:
- overall HEALTH_
- \(PG_
conf:
global:
osd heartbeat grace: 40
osd max object name len: 460
osd max object namespace len: 64
mon:
mon osd crush smoke test: false
osd:
osd fast shutdown: false
valgrind:
mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
osd: [--tool=memcheck]
roles:
- [mon.a, mon.b, mon.c, mgr.x, mgr.y, osd.0, osd.1, osd.2, client.0]
tasks:
- install:
- ceph:
| 652
| 20.064516
| 69
|
yaml
|
null |
ceph-main/qa/suites/rados/valgrind-leaks/centos_latest.yaml
|
.qa/distros/supported/centos_latest.yaml
| 40
| 40
| 40
|
yaml
|
null |
ceph-main/qa/suites/rados/valgrind-leaks/2-inject-leak/mon.yaml
|
overrides:
ceph:
expect_valgrind_errors: true
tasks:
- exec:
mon.a:
- ceph tell mon.a leak_some_memory
| 119
| 14
| 40
|
yaml
|
null |
ceph-main/qa/suites/rados/valgrind-leaks/2-inject-leak/none.yaml
| 0
| 0
| 0
|
yaml
|
|
null |
ceph-main/qa/suites/rados/valgrind-leaks/2-inject-leak/osd.yaml
|
overrides:
ceph:
expect_valgrind_errors: true
tasks:
- exec:
mon.a:
- ceph tell osd.0 leak_some_memory
| 119
| 14
| 40
|
yaml
|
null |
ceph-main/qa/suites/rados/verify/centos_latest.yaml
|
.qa/distros/supported/centos_latest.yaml
| 40
| 40
| 40
|
yaml
|
null |
ceph-main/qa/suites/rados/verify/ceph.yaml
|
overrides:
ceph:
conf:
mon:
mon min osdmap epochs: 50
paxos service trim min: 10
# prune full osdmaps regularly
mon osdmap full prune min: 15
mon osdmap full prune interval: 2
mon osdmap full prune txsize: 2
osd:
debug monc: 20
tasks:
- install:
- ceph:
| 328
| 19.5625
| 41
|
yaml
|
null |
ceph-main/qa/suites/rados/verify/rados.yaml
|
.qa/config/rados.yaml
| 21
| 21
| 21
|
yaml
|
null |
ceph-main/qa/suites/rados/verify/clusters/fixed-2.yaml
|
.qa/clusters/fixed-2.yaml
| 25
| 25
| 25
|
yaml
|
null |
ceph-main/qa/suites/rados/verify/clusters/openstack.yaml
|
openstack:
- volumes: # attached to each instance
count: 4
size: 10 # GB
| 87
| 16.6
| 40
|
yaml
|
null |
ceph-main/qa/suites/rados/verify/d-thrash/none.yaml
| 0
| 0
| 0
|
yaml
|
|
null |
ceph-main/qa/suites/rados/verify/d-thrash/default/default.yaml
|
overrides:
ceph:
log-ignorelist:
- but it is still running
- objects unfound and apparently lost
tasks:
- thrashosds:
timeout: 1200
chance_pgnum_grow: 1
chance_pgnum_shrink: 1
chance_pgpnum_fix: 1
| 227
| 18
| 41
|
yaml
|
null |
ceph-main/qa/suites/rados/verify/d-thrash/default/thrashosds-health.yaml
|
.qa/tasks/thrashosds-health.yaml
| 32
| 32
| 32
|
yaml
|
null |
ceph-main/qa/suites/rados/verify/msgr-failures/few.yaml
|
overrides:
ceph:
conf:
global:
ms inject socket failures: 5000
mon client directed command retry: 5
log-ignorelist:
- \(OSD_SLOW_PING_TIME
| 177
| 18.777778
| 44
|
yaml
|
null |
ceph-main/qa/suites/rados/verify/tasks/mon_recovery.yaml
|
overrides:
ceph:
log-ignorelist:
- overall HEALTH_
- \(MON_DOWN\)
- \(OSDMAP_FLAGS\)
- \(SMALLER_PGP_NUM\)
- \(POOL_APP_NOT_ENABLED\)
- \(SLOW OPS\)
- slow request
tasks:
- mon_recovery:
| 235
| 17.153846
| 32
|
yaml
|
null |
ceph-main/qa/suites/rados/verify/tasks/rados_api_tests.yaml
|
overrides:
ceph:
log-ignorelist:
- reached quota
- overall HEALTH_
- \(CACHE_POOL_NO_HIT_SET\)
- \(POOL_FULL\)
- \(SMALLER_PGP_NUM\)
- \(SLOW_OPS\)
- \(CACHE_POOL_NEAR_FULL\)
- \(POOL_APP_NOT_ENABLED\)
- \(PG_AVAILABILITY\)
- \(OBJECT_MISPLACED\)
- slow request
conf:
client:
debug ms: 1
debug objecter: 20
debug rados: 20
debug monc: 20
mon:
mon warn on pool no app: false
osd:
osd class load list: "*"
osd class default list: "*"
osd client watch timeout: 120
tasks:
- workunit:
timeout: 6h
env:
ALLOW_TIMEOUTS: "1"
clients:
client.0:
- rados/test.sh
| 743
| 20.257143
| 38
|
yaml
|
null |
ceph-main/qa/suites/rados/verify/tasks/rados_cls_all.yaml
|
overrides:
ceph:
conf:
osd:
osd_class_load_list: "*"
osd_class_default_list: "*"
tasks:
- workunit:
clients:
client.0:
- cls
| 171
| 13.333333
| 35
|
yaml
|
null |
ceph-main/qa/suites/rados/verify/validater/lockdep.yaml
|
overrides:
ceph:
conf:
global:
lockdep: true
| 65
| 10
| 21
|
yaml
|
null |
ceph-main/qa/suites/rados/verify/validater/valgrind.yaml
|
# see http://tracker.ceph.com/issues/20360 and http://tracker.ceph.com/issues/18126
os_type: centos
overrides:
install:
ceph:
debuginfo: true
ceph:
conf:
global:
osd heartbeat grace: 80
mon:
mon osd crush smoke test: false
osd:
osd fast shutdown: false
debug bluestore: 1
debug bluefs: 1
log-ignorelist:
- overall HEALTH_
# valgrind is slow.. we might get PGs stuck peering etc
- \(PG_
# mons sometimes are left off of initial quorum due to valgrind slowness. ok to ignore here because we'll still catch an actual crash due to the core
- \(MON_DOWN\)
- \(SLOW_OPS\)
- slow request
valgrind:
mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
osd: [--tool=memcheck]
mds: [--tool=memcheck]
# https://tracker.ceph.com/issues/38621
# mgr: [--tool=memcheck]
| 905
| 27.3125
| 150
|
yaml
|
null |
ceph-main/qa/suites/rbd/basic/base/install.yaml
|
tasks:
- install:
- ceph:
| 26
| 5.75
| 10
|
yaml
|
null |
ceph-main/qa/suites/rbd/basic/cachepool/none.yaml
| 0
| 0
| 0
|
yaml
|
|
null |
ceph-main/qa/suites/rbd/basic/cachepool/small.yaml
|
overrides:
ceph:
log-ignorelist:
- overall HEALTH_
- \(CACHE_POOL_NEAR_FULL\)
- \(CACHE_POOL_NO_HIT_SET\)
tasks:
- exec:
client.0:
- sudo ceph osd pool create cache 4
- sudo ceph osd tier add rbd cache
- sudo ceph osd tier cache-mode cache writeback
- sudo ceph osd tier set-overlay rbd cache
- sudo ceph osd pool set cache hit_set_type bloom
- sudo ceph osd pool set cache hit_set_count 8
- sudo ceph osd pool set cache hit_set_period 60
- sudo ceph osd pool set cache target_max_objects 250
| 569
| 30.666667
| 59
|
yaml
|
null |
ceph-main/qa/suites/rbd/basic/clusters/fixed-1.yaml
|
.qa/clusters/fixed-1.yaml
| 25
| 25
| 25
|
yaml
|
null |
ceph-main/qa/suites/rbd/basic/clusters/openstack.yaml
|
openstack:
- volumes: # attached to each instance
count: 3
size: 30 # GB
| 87
| 16.6
| 40
|
yaml
|
null |
ceph-main/qa/suites/rbd/basic/msgr-failures/few.yaml
|
overrides:
ceph:
conf:
global:
ms inject socket failures: 5000
mon client directed command retry: 5
log-ignorelist:
- \(OSD_SLOW_PING_TIME
| 177
| 18.777778
| 44
|
yaml
|
null |
ceph-main/qa/suites/rbd/basic/tasks/rbd_api_tests_old_format.yaml
|
overrides:
ceph:
log-ignorelist:
- overall HEALTH_
- \(CACHE_POOL_NO_HIT_SET\)
- \(POOL_APP_NOT_ENABLED\)
- is full \(reached quota
- \(POOL_FULL\)
tasks:
- workunit:
clients:
client.0:
- rbd/test_librbd.sh
| 261
| 17.714286
| 33
|
yaml
|
null |
ceph-main/qa/suites/rbd/basic/tasks/rbd_cls_tests.yaml
|
tasks:
- workunit:
clients:
client.0:
- cls/test_cls_rbd.sh
- cls/test_cls_lock.sh
- cls/test_cls_journal.sh
| 143
| 17
| 33
|
yaml
|
null |
ceph-main/qa/suites/rbd/basic/tasks/rbd_lock_and_fence.yaml
|
tasks:
- workunit:
clients:
client.0:
- rbd/test_lock_fence.sh
| 81
| 12.666667
| 32
|
yaml
|
null |
ceph-main/qa/suites/rbd/basic/tasks/rbd_python_api_tests_old_format.yaml
|
overrides:
ceph:
log-ignorelist:
- \(SLOW_OPS\)
- slow request
tasks:
- workunit:
clients:
client.0:
- rbd/test_librbd_python.sh
| 165
| 14.090909
| 35
|
yaml
|
null |
ceph-main/qa/suites/rbd/cli/base/install.yaml
|
tasks:
- install:
- ceph:
| 26
| 5.75
| 10
|
yaml
|
null |
ceph-main/qa/suites/rbd/cli/features/defaults.yaml
|
overrides:
ceph:
conf:
client:
rbd default features: 61
| 76
| 11.833333
| 32
|
yaml
|
null |
ceph-main/qa/suites/rbd/cli/features/journaling.yaml
|
overrides:
ceph:
conf:
client:
rbd default features: 125
| 77
| 12
| 33
|
yaml
|
null |
ceph-main/qa/suites/rbd/cli/features/layering.yaml
|
overrides:
ceph:
conf:
client:
rbd default features: 1
| 75
| 11.666667
| 31
|
yaml
|
null |
ceph-main/qa/suites/rbd/cli/msgr-failures/few.yaml
|
overrides:
ceph:
conf:
global:
ms inject socket failures: 5000
mon client directed command retry: 5
log-ignorelist:
- \(OSD_SLOW_PING_TIME
| 177
| 18.777778
| 44
|
yaml
|
null |
ceph-main/qa/suites/rbd/cli/pool/ec-data-pool.yaml
|
tasks:
- exec:
client.0:
- sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2
- sudo ceph osd pool create datapool 4 4 erasure teuthologyprofile
- sudo ceph osd pool set datapool allow_ec_overwrites true
- rbd pool init datapool
overrides:
thrashosds:
bdev_inject_crash: 2
bdev_inject_crash_probability: .5
ceph:
fs: xfs
log-ignorelist:
- overall HEALTH_
- \(CACHE_POOL_NO_HIT_SET\)
conf:
client:
rbd default data pool: datapool
osd: # force bluestore since it's required for ec overwrites
osd objectstore: bluestore
bluestore block size: 96636764160
enable experimental unrecoverable data corrupting features: "*"
osd debug randomize hobject sort order: false
# this doesn't work with failures bc the log writes are not atomic across the two backends
# bluestore bluefs env mirror: true
| 951
| 33
| 97
|
yaml
|
null |
ceph-main/qa/suites/rbd/cli/pool/none.yaml
| 0
| 0
| 0
|
yaml
|
|
null |
ceph-main/qa/suites/rbd/cli/pool/replicated-data-pool.yaml
|
tasks:
- exec:
client.0:
- sudo ceph osd pool create datapool 4
- rbd pool init datapool
overrides:
ceph:
conf:
client:
rbd default data pool: datapool
| 189
| 14.833333
| 44
|
yaml
|
null |
ceph-main/qa/suites/rbd/cli/pool/small-cache-pool.yaml
|
overrides:
ceph:
log-ignorelist:
- overall HEALTH_
- \(CACHE_POOL_NEAR_FULL\)
- \(CACHE_POOL_NO_HIT_SET\)
tasks:
- exec:
client.0:
- sudo ceph osd pool create cache 4
- sudo ceph osd tier add rbd cache
- sudo ceph osd tier cache-mode cache writeback
- sudo ceph osd tier set-overlay rbd cache
- sudo ceph osd pool set cache hit_set_type bloom
- sudo ceph osd pool set cache hit_set_count 8
- sudo ceph osd pool set cache hit_set_period 60
- sudo ceph osd pool set cache target_max_objects 250
| 569
| 30.666667
| 59
|
yaml
|
null |
ceph-main/qa/suites/rbd/cli/workloads/rbd_cli_generic.yaml
|
tasks:
- workunit:
clients:
client.0:
- rbd/cli_generic.sh
| 77
| 12
| 28
|
yaml
|
null |
ceph-main/qa/suites/rbd/cli/workloads/rbd_cli_groups.yaml
|
tasks:
- workunit:
clients:
client.0:
- rbd/rbd_groups.sh
| 76
| 11.833333
| 27
|
yaml
|
null |
ceph-main/qa/suites/rbd/cli/workloads/rbd_cli_import_export.yaml
|
tasks:
- workunit:
clients:
client.0:
- rbd/import_export.sh
| 79
| 12.333333
| 30
|
yaml
|
null |
ceph-main/qa/suites/rbd/cli/workloads/rbd_cli_luks_encryption.yaml
|
overrides:
install:
ceph:
extra_packages: [rbd-nbd]
tasks:
- workunit:
clients:
client.0:
- rbd/luks-encryption.sh
| 145
| 13.6
| 32
|
yaml
|
null |
ceph-main/qa/suites/rbd/cli/workloads/rbd_cli_migration.yaml
|
tasks:
- workunit:
clients:
client.0:
- rbd/cli_migration.sh
| 79
| 12.333333
| 30
|
yaml
|
null |
ceph-main/qa/suites/rbd/cli_v1/base/install.yaml
|
tasks:
- install:
- ceph:
| 26
| 5.75
| 10
|
yaml
|
null |
ceph-main/qa/suites/rbd/cli_v1/features/format-1.yaml
|
overrides:
ceph:
conf:
client:
rbd default format: 1
| 73
| 11.333333
| 29
|
yaml
|
null |
ceph-main/qa/suites/rbd/cli_v1/msgr-failures/few.yaml
|
overrides:
ceph:
conf:
global:
ms inject socket failures: 5000
mon client directed command retry: 5
log-ignorelist:
- \(OSD_SLOW_PING_TIME
| 177
| 18.777778
| 44
|
yaml
|
null |
ceph-main/qa/suites/rbd/cli_v1/pool/none.yaml
| 0
| 0
| 0
|
yaml
|
|
null |
ceph-main/qa/suites/rbd/cli_v1/pool/small-cache-pool.yaml
|
overrides:
ceph:
log-ignorelist:
- overall HEALTH_
- \(CACHE_POOL_NEAR_FULL\)
- \(CACHE_POOL_NO_HIT_SET\)
tasks:
- exec:
client.0:
- sudo ceph osd pool create cache 4
- sudo ceph osd tier add rbd cache
- sudo ceph osd tier cache-mode cache writeback
- sudo ceph osd tier set-overlay rbd cache
- sudo ceph osd pool set cache hit_set_type bloom
- sudo ceph osd pool set cache hit_set_count 8
- sudo ceph osd pool set cache hit_set_period 60
- sudo ceph osd pool set cache target_max_objects 250
| 569
| 30.666667
| 59
|
yaml
|