Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
|---|---|---|---|---|---|---|
null |
ceph-main/qa/suites/fs/bugs/client_trim_caps/overrides/ignorelist_health.yaml
|
.qa/cephfs/overrides/ignorelist_health.yaml
| 43
| 43
| 43
|
yaml
|
null |
ceph-main/qa/suites/fs/bugs/client_trim_caps/overrides/ignorelist_wrongly_marked_down.yaml
|
.qa/cephfs/overrides/ignorelist_wrongly_marked_down.yaml
| 56
| 56
| 56
|
yaml
|
null |
ceph-main/qa/suites/fs/bugs/client_trim_caps/overrides/no_client_pidfile.yaml
|
.qa/overrides/no_client_pidfile.yaml
| 36
| 36
| 36
|
yaml
|
null |
ceph-main/qa/suites/fs/bugs/client_trim_caps/tasks/trim-i24137.yaml
|
# Note this test is unlikely to exercise the code as expected in the future:
# "It's too tricky to arrange inodes in session->caps. we don't know if it
# still works in the future." -Zheng
tasks:
- exec:
mon.a:
- ceph config set mds mds_min_caps_per_client 1
- background_exec:
mon.a:
- "sleep 30 && ceph config set mds mds_max_caps_per_client 1"
- exec:
client.0:
- ceph_test_trim_caps
| 412
| 26.533333
| 76
|
yaml
|
null |
ceph-main/qa/suites/fs/cephadm/multivolume/0-start.yaml
|
roles:
- - host.a
- osd.0
- osd.1
- osd.2
- osd.3
- client.0
- - host.b
- osd.4
- osd.5
- osd.6
- osd.7
- client.1
openstack:
- volumes: # attached to each instance
count: 4
size: 10 # GB
overrides:
ceph:
conf:
osd:
osd shutdown pgref assert: true
tasks:
- install:
- cephadm:
roleless: true
- cephadm.shell:
host.a:
- ceph orch status
- ceph orch ps
- ceph orch ls
- ceph orch host ls
- ceph orch device ls
- cephadm.shell:
host.a:
- ceph fs volume create foo
- ceph fs volume create bar
- fs.ready:
timeout: 300
| 620
| 14.525
| 39
|
yaml
|
null |
ceph-main/qa/suites/fs/cephadm/multivolume/1-mount.yaml
|
tasks:
- ceph-fuse:
client.0:
cephfs_name: foo
- ceph-fuse:
client.1:
cephfs_name: bar
| 119
| 14
| 24
|
yaml
|
null |
ceph-main/qa/suites/fs/cephadm/multivolume/2-workload/dbench.yaml
|
.qa/suites/fs/workload/tasks/5-workunit/suites/dbench.yaml
| 58
| 58
| 58
|
yaml
|
null |
ceph-main/qa/suites/fs/cephadm/multivolume/distro/single-container-host.yaml
|
.qa/distros/single-container-host.yaml
| 38
| 38
| 38
|
yaml
|
null |
ceph-main/qa/suites/fs/cephadm/renamevolume/0-start.yaml
|
roles:
- - host.a
- osd.0
- osd.1
- osd.2
- osd.3
- client.0
- - host.b
- osd.4
- osd.5
- osd.6
- osd.7
- client.1
openstack:
- volumes: # attached to each instance
count: 4
size: 10 # GB
overrides:
ceph:
conf:
osd:
osd shutdown pgref assert: true
tasks:
- install:
- cephadm:
roleless: true
- cephadm.shell:
host.a:
- ceph orch status
- ceph orch ps
- ceph orch ls
- ceph orch host ls
- ceph orch device ls
- cephadm.shell:
host.a:
- ceph fs volume create foo
- fs.ready:
timeout: 300
| 586
| 14.051282
| 39
|
yaml
|
null |
ceph-main/qa/suites/fs/cephadm/renamevolume/1-rename.yaml
|
tasks:
- cephadm.shell:
host.a:
- ceph fs volume rename foo bar --yes-i-really-mean-it
- fs.ready:
timeout: 300
- cephadm.shell:
host.a:
- |
set -ex
ceph orch ls mds --format=json | jq ".[] | .service_name" | grep "mds.bar"
| 264
| 21.083333
| 82
|
yaml
|
null |
ceph-main/qa/suites/fs/cephadm/renamevolume/distro/single-container-host.yaml
|
.qa/distros/single-container-host.yaml
| 38
| 38
| 38
|
yaml
|
null |
ceph-main/qa/suites/fs/cephadm/renamevolume/overrides/ignorelist_health.yaml
|
.qa/cephfs/overrides/ignorelist_health.yaml
| 43
| 43
| 43
|
yaml
|
null |
ceph-main/qa/suites/fs/fscrypt/bluestore-bitmap.yaml
|
.qa/cephfs/objectstore-ec/bluestore-bitmap.yaml
| 47
| 47
| 47
|
yaml
|
null |
ceph-main/qa/suites/fs/fscrypt/clusters/1-mds-1-client.yaml
|
.qa/cephfs/clusters/1-mds-1-client.yaml
| 39
| 39
| 39
|
yaml
|
null |
ceph-main/qa/suites/fs/fscrypt/distro/centos_latest.yaml
|
.qa/distros/supported/centos_latest.yaml
| 40
| 40
| 40
|
yaml
|
null |
ceph-main/qa/suites/fs/fscrypt/distro/ubuntu_latest.yaml
|
.qa/distros/supported/ubuntu_latest.yaml
| 40
| 40
| 40
|
yaml
|
null |
ceph-main/qa/suites/fs/fscrypt/overrides/ignorelist_health.yaml
|
.qa/cephfs/overrides/ignorelist_health.yaml
| 43
| 43
| 43
|
yaml
|
null |
ceph-main/qa/suites/fs/fscrypt/overrides/ignorelist_health_more.yaml
|
overrides:
ceph:
log-ignorelist:
- Reduced data availability
- Degraded data redundancy
| 106
| 16.833333
| 33
|
yaml
|
null |
ceph-main/qa/suites/fs/fscrypt/overrides/ignorelist_wrongly_marked_down.yaml
|
.qa/cephfs/overrides/ignorelist_wrongly_marked_down.yaml
| 56
| 56
| 56
|
yaml
|
null |
ceph-main/qa/suites/fs/fscrypt/overrides/pg-warn.yaml
|
overrides:
ceph:
conf:
global:
mon pg warn min per osd: 0
| 78
| 12.166667
| 34
|
yaml
|
null |
ceph-main/qa/suites/fs/fscrypt/tasks/0-client.yaml
|
teuthology:
postmerge:
# Once can we make sure the distro kernels have included the fscrypt feature
# or the ceph-fuse have supported the fscrypt feature we can remove this
# restriction.
- if not is_kupstream() then reject() end
task:
| 254
| 30.875
| 80
|
yaml
|
null |
ceph-main/qa/suites/fs/fscrypt/tasks/1-tests/fscrypt-common.yaml
|
tasks:
- cephfs_test_runner:
fail_on_skip: false
modules:
- tasks.cephfs.test_fscrypt
| 108
| 17.166667
| 35
|
yaml
|
null |
ceph-main/qa/suites/fs/fscrypt/tasks/1-tests/fscrypt-dbench.yaml
|
tasks:
- workunit:
timeout: 6h
clients:
client.0:
- fs/fscrypt.sh none dbench
- fs/fscrypt.sh unlocked dbench
| 140
| 16.625
| 39
|
yaml
|
null |
ceph-main/qa/suites/fs/fscrypt/tasks/1-tests/fscrypt-ffsb.yaml
|
tasks:
- workunit:
timeout: 6h
clients:
client.0:
- fs/fscrypt.sh none ffsb
- fs/fscrypt.sh unlocked ffsb
| 136
| 16.125
| 37
|
yaml
|
null |
ceph-main/qa/suites/fs/fscrypt/tasks/1-tests/fscrypt-iozone.yaml
|
tasks:
- workunit:
timeout: 6h
clients:
client.0:
- fs/fscrypt.sh none iozone
- fs/fscrypt.sh unlocked iozone
| 140
| 16.625
| 39
|
yaml
|
null |
ceph-main/qa/suites/fs/fscrypt/tasks/1-tests/fscrypt-pjd.yaml
|
tasks:
- workunit:
timeout: 6h
clients:
client.0:
- fs/fscrypt.sh none pjd
- fs/fscrypt.sh unlocked pjd
| 134
| 15.875
| 36
|
yaml
|
null |
ceph-main/qa/suites/fs/full/overrides.yaml
|
overrides:
ceph:
conf:
mgr:
debug client: 20
log-ignorelist:
- OSD full dropping all updates
- OSD near full
- pausewr flag
- failsafe engaged, dropping updates
- failsafe disengaged, no longer dropping
- is full \(reached quota
- POOL_FULL
- POOL_NEARFULL
- POOL_BACKFILLFULL
- PG_DEGRADED
- OSD_OUT_OF_ORDER_FULL
- OSD_NEARFULL
- OSD_FULL
| 443
| 21.2
| 47
|
yaml
|
null |
ceph-main/qa/suites/fs/full/clusters/1-node-1-mds-1-osd.yaml
|
.qa/cephfs/clusters/1-node-1-mds-1-osd.yaml
| 43
| 43
| 43
|
yaml
|
null |
ceph-main/qa/suites/fs/full/mount/fuse.yaml
|
tasks:
- ceph-fuse:
| 22
| 6.666667
| 14
|
yaml
|
null |
ceph-main/qa/suites/fs/full/objectstore/bluestore-bitmap.yaml
|
.qa/objectstore/bluestore-bitmap.yaml
| 37
| 37
| 37
|
yaml
|
null |
ceph-main/qa/suites/fs/full/overrides/ignorelist_health.yaml
|
.qa/cephfs/overrides/ignorelist_health.yaml
| 43
| 43
| 43
|
yaml
|
null |
ceph-main/qa/suites/fs/full/overrides/ignorelist_wrongly_marked_down.yaml
|
.qa/cephfs/overrides/ignorelist_wrongly_marked_down.yaml
| 56
| 56
| 56
|
yaml
|
null |
ceph-main/qa/suites/fs/full/overrides/no_client_pidfile.yaml
|
.qa/overrides/no_client_pidfile.yaml
| 36
| 36
| 36
|
yaml
|
null |
ceph-main/qa/suites/fs/full/tasks/mgr-osd-full.yaml
|
overrides:
ceph:
conf:
global:
osd_pool_default_size: 1
osd_pool_default_min_size: 1
client:
debug ms: 1
debug client: 20
mds:
debug ms: 1
debug mds: 20
osd: # force bluestore since it's required for ec overwrites
osd objectstore: bluestore
bluestore block size: 1073741824
tasks:
- workunit:
cleanup: true
clients:
client.0:
- fs/full/subvolume_rm.sh
- workunit:
cleanup: true
clients:
client.0:
- fs/full/subvolume_clone.sh
- workunit:
cleanup: true
clients:
client.0:
- fs/full/subvolume_snapshot_rm.sh
| 666
| 19.84375
| 66
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/clusters/1a3s-mds-4c-client.yaml
|
.qa/cephfs/clusters/1a3s-mds-4c-client.yaml
| 43
| 43
| 43
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/objectstore/bluestore-bitmap.yaml
|
.qa/objectstore/bluestore-bitmap.yaml
| 37
| 37
| 37
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/objectstore/bluestore-ec-root.yaml
|
.qa/cephfs/objectstore-ec/bluestore-ec-root.yaml
| 48
| 48
| 48
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/overrides/ignorelist_health.yaml
|
.qa/cephfs/overrides/ignorelist_health.yaml
| 43
| 43
| 43
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/overrides/ignorelist_wrongly_marked_down.yaml
|
.qa/cephfs/overrides/ignorelist_wrongly_marked_down.yaml
| 56
| 56
| 56
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/overrides/no_client_pidfile.yaml
|
.qa/overrides/no_client_pidfile.yaml
| 36
| 36
| 36
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/tasks/acls.yaml
|
overrides:
ceph:
log-ignorelist:
- Reduced data availability
- Degraded data redundancy
tasks:
- cephfs_test_runner:
fail_on_skip: false
modules:
- tasks.cephfs.test_acls
| 211
| 18.272727
| 33
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/tasks/admin.yaml
|
overrides:
ceph:
conf:
global:
lockdep: true
log-ignorelist:
- missing required features
tasks:
- cephfs_test_runner:
fail_on_skip: false
modules:
- tasks.cephfs.test_admin
| 225
| 16.384615
| 33
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/tasks/alternate-pool.yaml
|
overrides:
ceph:
log-ignorelist:
- bad backtrace
- object missing on disk
- error reading table object
- error reading sessionmap
- unmatched fragstat
- unmatched rstat
- was unreadable, recreating it now
- Scrub error on inode
- Metadata damage detected
- MDS_FAILED
- MDS_DAMAGE
tasks:
- cephfs_test_runner:
modules:
- tasks.cephfs.test_recovery_pool
| 442
| 21.15
| 41
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/tasks/asok_dump_tree.yaml
|
tasks:
- cephfs_test_runner:
modules:
- tasks.cephfs.test_dump_tree
| 78
| 14.8
| 35
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/tasks/auto-repair.yaml
|
overrides:
ceph:
log-ignorelist:
- force file system read-only
- bad backtrace
- MDS in read-only mode
- \(MDS_READ_ONLY\)
tasks:
- cephfs_test_runner:
modules:
- tasks.cephfs.test_auto_repair
| 239
| 19
| 39
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/tasks/backtrace.yaml
|
tasks:
- cephfs_test_runner:
modules:
- tasks.cephfs.test_backtrace
| 84
| 16
| 37
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/tasks/cap-flush.yaml
|
overrides:
ceph:
log-ignorelist:
- Replacing daemon mds.a
tasks:
- cephfs_test_runner:
modules:
- tasks.cephfs.test_cap_flush
| 154
| 16.222222
| 37
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/tasks/client-limits.yaml
|
overrides:
ceph:
log-ignorelist:
- responding to mclientcaps\(revoke\)
- not advance its oldest_client_tid
- failing to advance its oldest client/flush tid
- Too many inodes in cache
- failing to respond to cache pressure
- slow requests are blocked
- failing to respond to capability release
- MDS cache is too large
- \(MDS_CLIENT_OLDEST_TID\)
- \(MDS_CACHE_OVERSIZED\)
tasks:
- cephfs_test_runner:
fail_on_skip: false
modules:
- tasks.cephfs.test_client_limits
| 552
| 28.105263
| 54
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/tasks/client-readahead.yaml
|
tasks:
- cephfs_test_runner:
fail_on_skip: false
modules:
- tasks.cephfs.test_readahead
| 110
| 17.5
| 37
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/tasks/client-recovery.yaml
|
# The task interferes with the network, so we need
# to permit OSDs to complain about that.
overrides:
ceph:
log-ignorelist:
- evicting unresponsive client
- but it is still running
- slow request
- MDS_CLIENT_LATE_RELEASE
- t responding to mclientcaps
- file system flag refuse_client_session is set
- Degraded data redundancy
- MDS_CLIENTS_LAGGY
- Reduced data availability
tasks:
- cephfs_test_runner:
fail_on_skip: false
modules:
- tasks.cephfs.test_client_recovery
| 552
| 26.65
| 53
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/tasks/damage.yaml
|
overrides:
ceph:
log-ignorelist:
- bad backtrace
- object missing on disk
- error reading table object
- error reading sessionmap
- Error loading MDS rank
- missing journal object
- Error recovering journal
- error decoding table object
- failed to read JournalPointer
- Corrupt directory entry
- Corrupt fnode header
- corrupt sessionmap header
- Corrupt dentry
- Scrub error on inode
- Metadata damage detected
- MDS_READ_ONLY
- force file system read-only
- with standby daemon mds
tasks:
- cephfs_test_runner:
modules:
- tasks.cephfs.test_damage
| 681
| 24.259259
| 37
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/tasks/data-scan.yaml
|
overrides:
ceph:
log-ignorelist:
- bad backtrace
- object missing on disk
- error reading table object
- error reading sessionmap
- unmatched fragstat
- unmatched rstat
- was unreadable, recreating it now
- Scrub error on inode
- Metadata damage detected
- inconsistent rstat on inode
- Error recovering journal
tasks:
- cephfs_test_runner:
modules:
- tasks.cephfs.test_data_scan
| 468
| 23.684211
| 41
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/tasks/exports.yaml
|
overrides:
ceph:
log-ignorelist:
- Replacing daemon mds
tasks:
- cephfs_test_runner:
fail_on_skip: false
modules:
- tasks.cephfs.test_exports
| 168
| 15.9
| 33
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/tasks/forward-scrub.yaml
|
overrides:
ceph:
log-ignorelist:
- inode wrongly marked free
- bad backtrace on inode
- inode table repaired for inode
- Scrub error on inode
- Scrub error on dir
- Metadata damage detected
tasks:
- cephfs_test_runner:
modules:
- tasks.cephfs.test_forward_scrub
| 320
| 21.928571
| 41
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/tasks/fragment.yaml
|
tasks:
- cephfs_test_runner:
modules:
- tasks.cephfs.test_fragment
| 84
| 13.166667
| 36
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/tasks/journal-repair.yaml
|
overrides:
ceph:
log-ignorelist:
- bad backtrace on directory inode
- error reading table object
- Metadata damage detected
- slow requests are blocked
- Behind on trimming
- error reading sessionmap
tasks:
- cephfs_test_runner:
modules:
- tasks.cephfs.test_journal_repair
| 331
| 22.714286
| 42
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/tasks/mds-flush.yaml
|
tasks:
- cephfs_test_runner:
modules:
- tasks.cephfs.test_flush
| 80
| 15.2
| 33
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/tasks/mds-full.yaml
|
overrides:
ceph:
cephfs:
ec_profile:
- disabled
log-ignorelist:
- OSD full dropping all updates
- OSD near full
- pausewr flag
- failsafe engaged, dropping updates
- failsafe disengaged, no longer dropping
- is full \(reached quota
- POOL_FULL
- POOL_BACKFILLFULL
- PG_RECOVERY_FULL
- PG_DEGRADED
conf:
mon:
mon osd nearfull ratio: 0.6
mon osd backfillfull ratio: 0.6
mon osd full ratio: 0.7
osd:
osd mon report interval: 5
osd objectstore: memstore
osd failsafe full ratio: 1.0
memstore device bytes: 200000000
client.0:
debug client: 20
debug objecter: 20
debug objectcacher: 20
client.1:
debug client: 20
debug objecter: 20
debug objectcacher: 20
tasks:
- cephfs_test_runner:
modules:
- tasks.cephfs.test_full
| 945
| 23.25641
| 47
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/tasks/mds_creation_retry.yaml
|
tasks:
-mds_creation_failure:
- workunit:
clients:
all: [fs/misc/trivial_sync.sh]
| 93
| 12.428571
| 36
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/tasks/metrics.yaml
|
tasks:
- cephfs_test_runner:
fail_on_skip: false
modules:
- tasks.cephfs.test_mds_metrics
| 104
| 16.5
| 37
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/tasks/multimds_misc.yaml
|
overrides:
ceph:
log-ignorelist:
- Scrub error on inode
tasks:
- cephfs_test_runner:
modules:
- tasks.cephfs.test_multimds_misc
| 151
| 14.2
| 39
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/tasks/openfiletable.yaml
|
tasks:
- cephfs_test_runner:
modules:
- tasks.cephfs.test_openfiletable
| 88
| 16.8
| 41
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/tasks/pool-perm.yaml
|
tasks:
- cephfs_test_runner:
modules:
- tasks.cephfs.test_pool_perm
| 84
| 16
| 37
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/tasks/quota.yaml
|
tasks:
- cephfs_test_runner:
modules:
- tasks.cephfs.test_quota
| 80
| 15.2
| 33
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/tasks/recovery-fs.yaml
|
tasks:
- cephfs_test_runner:
modules:
- tasks.cephfs.test_recovery_fs
| 80
| 15.2
| 37
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/tasks/scrub.yaml
|
overrides:
ceph:
log-ignorelist:
- Replacing daemon mds
- Scrub error on inode
- Behind on trimming
- Metadata damage detected
- bad backtrace on inode
- overall HEALTH_
- \(MDS_TRIM\)
conf:
mds:
mds log max segments: 1
mds cache max size: 1000
tasks:
- cephfs_test_runner:
modules:
- tasks.cephfs.test_scrub_checks
- tasks.cephfs.test_scrub
| 432
| 20.65
| 38
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/tasks/sessionmap.yaml
|
overrides:
ceph:
log-ignorelist:
- client session with non-allowable root
tasks:
- cephfs_test_runner:
fail_on_skip: false
modules:
- tasks.cephfs.test_sessionmap
| 197
| 18.8
| 46
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/tasks/snap-schedule.yaml
|
overrides:
ceph:
conf:
mgr:
debug mgr: 20
debug ms: 1
debug finisher: 20
debug client: 20
log-whitelist:
- OSD full dropping all updates
- OSD near full
- pausewr flag
- failsafe engaged, dropping updates
- failsafe disengaged, no longer dropping
- is full \(reached quota
- POOL_FULL
- POOL_BACKFILLFULL
tasks:
- cephfs_test_runner:
modules:
- tasks.cephfs.test_snap_schedules
| 491
| 20.391304
| 47
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/tasks/snap_schedule_snapdir.yaml
|
overrides:
ceph:
conf:
mgr:
debug mgr: 20
debug ms: 1
debug finisher: 20
debug client: 20
log-whitelist:
- OSD full dropping all updates
- OSD near full
- pausewr flag
- failsafe engaged, dropping updates
- failsafe disengaged, no longer dropping
- is full \(reached quota
- POOL_FULL
- POOL_BACKFILLFULL
overrides:
kclient:
snapdirname: .customsnapkernel
ceph:
conf:
client:
client snapdir: .customsnapfuse
tasks:
- cephfs_test_runner:
modules:
- tasks.cephfs.test_snap_schedules.TestSnapSchedulesSnapdir
| 646
| 19.870968
| 67
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/tasks/snapshots.yaml
|
overrides:
check-counter:
dry_run: true
ceph:
log-ignorelist:
- evicting unresponsive client
- RECENT_CRASH
tasks:
- exec:
mon.a:
- ceph config set mgr mgr/crash/warn_recent_interval 0
- cephfs_test_runner:
fail_on_skip: false
modules:
- tasks.cephfs.test_snapshots
| 315
| 17.588235
| 60
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/tasks/strays.yaml
|
tasks:
- cephfs_test_runner:
modules:
- tasks.cephfs.test_strays
| 81
| 15.4
| 34
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/tasks/test_journal_migration.yaml
|
tasks:
- cephfs_test_runner:
modules:
- tasks.cephfs.test_journal_migration
| 86
| 16.4
| 43
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/tasks/truncate_delay.yaml
|
overrides:
ceph:
conf:
client:
ms_inject_delay_probability: 1
ms_inject_delay_type: osd
ms_inject_delay_max: 5
client_oc_max_dirty_age: 1
tasks:
- exec:
client.0:
- cd $TESTDIR/mnt.0 && dd if=/dev/zero of=./foo count=100
- sleep 2
- cd $TESTDIR/mnt.0 && truncate --size 0 ./foo
| 344
| 22
| 63
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/tasks/xfstests-dev.yaml
|
tasks:
- cephfs_test_runner:
fail_on_skip: false
modules:
- tasks.cephfs.tests_from_xfstests_dev
| 119
| 19
| 46
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/tasks/workunit/dir-max-entries.yaml
|
tasks:
- workunit:
clients:
client.0:
- fs/maxentries
| 72
| 11.166667
| 23
|
yaml
|
null |
ceph-main/qa/suites/fs/functional/tasks/workunit/quota.yaml
|
tasks:
- workunit:
clients:
all:
- fs/quota
| 62
| 9.5
| 18
|
yaml
|
null |
ceph-main/qa/suites/fs/libcephfs/clusters/1-mds-1-client-coloc.yaml
|
.qa/cephfs/clusters/1-mds-1-client-coloc.yaml
| 45
| 45
| 45
|
yaml
|
null |
ceph-main/qa/suites/fs/libcephfs/objectstore/bluestore-bitmap.yaml
|
.qa/objectstore/bluestore-bitmap.yaml
| 37
| 37
| 37
|
yaml
|
null |
ceph-main/qa/suites/fs/libcephfs/overrides/ignorelist_health.yaml
|
.qa/cephfs/overrides/ignorelist_health.yaml
| 43
| 43
| 43
|
yaml
|
null |
ceph-main/qa/suites/fs/libcephfs/overrides/ignorelist_wrongly_marked_down.yaml
|
.qa/cephfs/overrides/ignorelist_wrongly_marked_down.yaml
| 56
| 56
| 56
|
yaml
|
null |
ceph-main/qa/suites/fs/libcephfs/overrides/no_client_pidfile.yaml
|
.qa/overrides/no_client_pidfile.yaml
| 36
| 36
| 36
|
yaml
|
null |
ceph-main/qa/suites/fs/libcephfs/tasks/client.yaml
|
overrides:
ceph:
conf:
client:
debug ms: 1
debug client: 20
mds:
debug ms: 1
debug mds: 20
tasks:
- workunit:
clients:
client.0:
- client/test.sh
| 214
| 13.333333
| 24
|
yaml
|
null |
ceph-main/qa/suites/fs/libcephfs/tasks/ino_release_cb.yaml
|
overrides:
ceph:
conf:
client:
debug ms: 1
debug client: 20
mds:
debug ms: 1
debug mds: 20
tasks:
- exec:
mon.a:
- ceph config set mds mds_min_caps_per_client 1
- background_exec:
mon.a:
- "sleep 30 && ceph config set mds mds_max_caps_per_client 1"
- exec:
client.0:
- ceph_test_ino_release_cb
| 368
| 17.45
| 65
|
yaml
|
null |
ceph-main/qa/suites/fs/libcephfs/tasks/libcephfs_python.yaml
|
overrides:
ceph-fuse:
disabled: true
kclient:
disabled: true
tasks:
- workunit:
clients:
client.0:
- fs/test_python.sh
| 149
| 12.636364
| 27
|
yaml
|
null |
ceph-main/qa/suites/fs/libcephfs/tasks/libcephfs/frag.yaml
|
.qa/cephfs/overrides/frag.yaml
| 30
| 30
| 30
|
yaml
|
null |
ceph-main/qa/suites/fs/libcephfs/tasks/libcephfs/test.yaml
|
overrides:
ceph:
conf:
client:
debug ms: 1
debug client: 20
mds:
debug ms: 1
debug mds: 20
tasks:
- check-counter:
counters:
mds:
- "mds.dir_split"
- workunit:
clients:
client.0:
- libcephfs/test.sh
| 285
| 14.052632
| 27
|
yaml
|
null |
ceph-main/qa/suites/fs/mirror-ha/cephfs-mirror/three-per-cluster.yaml
|
meta:
- desc: run one cephfs-mirror daemon on primary cluster
tasks:
- cephfs-mirror:
client: client.mirror1
run_in_foreground: True
- cephfs-mirror:
client: client.mirror2
run_in_foreground: True
- cephfs-mirror:
client: client.mirror3
run_in_foreground: True
| 285
| 21
| 55
|
yaml
|
null |
ceph-main/qa/suites/fs/mirror-ha/clients/mirror.yaml
|
meta:
- desc: configure the permissions for client.mirror
overrides:
ceph:
conf:
client:
debug cephfs_mirror: 20
log to stderr: false
# make these predictable
client.mirror1:
admin socket: /var/run/ceph/cephfs-mirror1.asok
pid file: /var/run/ceph/cephfs-mirror1.pid
client.mirror2:
admin socket: /var/run/ceph/cephfs-mirror2.asok
pid file: /var/run/ceph/cephfs-mirror2.pid
client.mirror3:
admin socket: /var/run/ceph/cephfs-mirror3.asok
pid file: /var/run/ceph/cephfs-mirror3.pid
tasks:
- exec:
client.mirror1:
- "sudo ceph auth caps client.mirror1 mon 'profile cephfs-mirror' mds 'allow r' osd 'allow rw tag cephfs metadata=*, allow r tag cephfs data=*' mgr 'allow r'"
client.mirror2:
- "sudo ceph auth caps client.mirror2 mon 'profile cephfs-mirror' mds 'allow r' osd 'allow rw tag cephfs metadata=*, allow r tag cephfs data=*' mgr 'allow r'"
client.mirror3:
- "sudo ceph auth caps client.mirror3 mon 'profile cephfs-mirror' mds 'allow r' osd 'allow rw tag cephfs metadata=*, allow r tag cephfs data=*' mgr 'allow r'"
client.mirror_remote:
- "sudo ceph auth caps client.mirror_remote mon 'allow r' mds 'allow rwps' osd 'allow rw tag cephfs *=*' mgr 'allow r'"
client.1:
- "sudo ceph auth caps client.0 mon 'allow r' mds 'allow rwps' osd 'allow rw tag cephfs *=*' mgr 'allow r'"
client.2:
- "sudo ceph auth caps client.1 mon 'allow r' mds 'allow rwps' osd 'allow rw tag cephfs *=*' mgr 'allow r'"
| 1,558
| 46.242424
| 164
|
yaml
|
null |
ceph-main/qa/suites/fs/mirror-ha/cluster/1-node.yaml
|
meta:
- desc: 1 ceph cluster with 1 mon, 1 mgr, 3 osds, 5 mdss
roles:
- - mon.a
- mgr.x
- mds.a
- mds.b
- mds.c
- mds.d
- mds.e
- osd.0
- osd.1
- osd.2
- client.0
- client.1
- client.2
- client.mirror1
- client.mirror2
- client.mirror3
- client.mirror_remote
| 290
| 13.55
| 56
|
yaml
|
null |
ceph-main/qa/suites/fs/mirror-ha/objectstore/bluestore-bitmap.yaml
|
.qa/objectstore/bluestore-bitmap.yaml
| 37
| 37
| 37
|
yaml
|
null |
ceph-main/qa/suites/fs/mirror-ha/overrides/whitelist_health.yaml
|
overrides:
ceph:
log-ignorelist:
- overall HEALTH_
- \(FS_DEGRADED\)
- \(MDS_FAILED\)
- \(MDS_DEGRADED\)
- \(FS_WITH_FAILED_MDS\)
- \(MDS_DAMAGE\)
- \(MDS_ALL_DOWN\)
- \(MDS_UP_LESS_THAN_MAX\)
- \(FS_INLINE_DATA_DEPRECATED\)
- Reduced data availability
- Degraded data redundancy
| 352
| 22.533333
| 37
|
yaml
|
null |
ceph-main/qa/suites/fs/mirror-ha/workloads/cephfs-mirror-ha-workunit.yaml
|
meta:
- desc: run the cephfs_mirror_ha.sh workunit to test cephfs-mirror daemon in HA active/active mode
overrides:
ceph:
conf:
mgr:
debug client: 10
tasks:
- exec:
mon.a:
- "ceph fs volume create dc"
- "ceph fs volume create dc-backup"
# Remove volumes during unwind to avoid MDS replacement warnings:
- full_sequential_finally:
- exec:
mon.a:
- ceph config set mon mon_allow_pool_delete true
- ceph fs volume rm dc --yes-i-really-mean-it
- ceph fs volume rm dc-backup --yes-i-really-mean-it
- ceph-fuse:
client.1:
cephfs_name: dc
client.2:
cephfs_name: dc-backup
- cephfs_mirror_thrash:
randomize: False
max_thrash_delay: 10
- workunit:
subdir: mirror
cleanup: False
clients:
client.1: [fs/cephfs_mirror_ha_gen.sh]
timeout: 1h
- exec:
client.2:
- "echo verifying synchronized snapshots..."
- workunit:
subdir: mirror
cleanup: False
clients:
client.2: [fs/cephfs_mirror_ha_verify.sh]
timeout: 3h
| 1,116
| 24.386364
| 98
|
yaml
|
null |
ceph-main/qa/suites/fs/mirror/cephfs-mirror/one-per-cluster.yaml
|
meta:
- desc: run one cephfs-mirror daemon on primary cluster
tasks:
- cephfs-mirror:
client: client.mirror
| 112
| 17.833333
| 55
|
yaml
|
null |
ceph-main/qa/suites/fs/mirror/clients/mirror.yaml
|
meta:
- desc: configure the permissions for client.mirror
overrides:
ceph:
conf:
client:
debug cephfs_mirror: 20
log to stderr: false
# make these predictable
client.mirror:
admin socket: /var/run/ceph/cephfs-mirror.asok
pid file: /var/run/ceph/cephfs-mirror.pid
tasks:
- exec:
client.mirror:
- "sudo ceph auth caps client.mirror mon 'profile cephfs-mirror' mds 'allow r' osd 'allow rw tag cephfs metadata=*, allow r tag cephfs data=*' mgr 'allow r'"
client.mirror_remote:
- "sudo ceph auth caps client.mirror_remote mon 'allow r' mds 'allow rwps' osd 'allow rw tag cephfs *=*' mgr 'allow r'"
| 669
| 34.263158
| 163
|
yaml
|
null |
ceph-main/qa/suites/fs/mirror/cluster/1-node.yaml
|
meta:
- desc: 1 ceph cluster with 1 mon, 1 mgr, 3 osds, 5 mdss
roles:
- - mon.a
- mgr.x
- mds.a
- mds.b
- mds.c
- mds.d
- mds.e
- osd.0
- osd.1
- osd.2
- client.0
- client.1
- client.mirror
- client.mirror_remote
| 238
| 13.058824
| 56
|
yaml
|
null |
ceph-main/qa/suites/fs/mirror/mount/fuse.yaml
|
tasks:
- ceph-fuse: [client.0, client.1]
| 43
| 13.666667
| 35
|
yaml
|
null |
ceph-main/qa/suites/fs/mirror/objectstore/bluestore-bitmap.yaml
|
.qa/objectstore/bluestore-bitmap.yaml
| 37
| 37
| 37
|
yaml
|
null |
ceph-main/qa/suites/fs/mirror/overrides/whitelist_health.yaml
|
overrides:
ceph:
log-ignorelist:
- overall HEALTH_
- \(FS_DEGRADED\)
- \(MDS_FAILED\)
- \(MDS_DEGRADED\)
- \(FS_WITH_FAILED_MDS\)
- \(MDS_DAMAGE\)
- \(MDS_ALL_DOWN\)
- \(MDS_UP_LESS_THAN_MAX\)
- \(FS_INLINE_DATA_DEPRECATED\)
- Reduced data availability
- Degraded data redundancy
| 352
| 22.533333
| 37
|
yaml
|
null |
ceph-main/qa/suites/fs/mirror/tasks/mirror.yaml
|
overrides:
ceph:
conf:
mgr:
debug client: 10
tasks:
- cephfs_test_runner:
modules:
- tasks.cephfs.test_mirroring.TestMirroring
| 164
| 14
| 51
|
yaml
|
null |
ceph-main/qa/suites/fs/mixed-clients/clusters/1a3s-mds-2c-client.yaml
|
.qa/cephfs/clusters/1a3s-mds-2c-client.yaml
| 43
| 43
| 43
|
yaml
|