Search is not available for this dataset
repo stringlengths 2 152 ⌀ | file stringlengths 15 239 | code stringlengths 0 58.4M | file_length int64 0 58.4M | avg_line_length float64 0 1.81M | max_line_length int64 0 12.7M | extension_type stringclasses 364
values |
|---|---|---|---|---|---|---|
null | ceph-main/qa/distros/supported-random-distro$/ubuntu_latest.yaml | ../all/ubuntu_latest.yaml | 25 | 25 | 25 | yaml |
null | ceph-main/qa/distros/supported/centos_latest.yaml | ../all/centos_8.yaml | 20 | 20 | 20 | yaml |
null | ceph-main/qa/distros/supported/rhel_latest.yaml | ../all/rhel_8.yaml | 18 | 18 | 18 | yaml |
null | ceph-main/qa/distros/supported/ubuntu_20.04.yaml | ../all/ubuntu_20.04.yaml | 24 | 24 | 24 | yaml |
null | ceph-main/qa/distros/supported/ubuntu_latest.yaml | ../all/ubuntu_latest.yaml | 25 | 25 | 25 | yaml |
null | ceph-main/qa/erasure-code/ec-feature-plugins-v2.yaml | #
# Test the expected behavior of the
#
# CEPH_FEATURE_ERASURE_CODE_PLUGINS_V2
#
# feature.
#
roles:
- - mon.a
- mon.b
- osd.0
- osd.1
- - osd.2
- mon.c
- mgr.x
tasks:
#
# Install firefly
#
- install:
branch: firefly
- ceph:
fs: xfs
#
# We don't need mon.c for now: it will be used later to make su... | 2,094 | 20.161616 | 114 | yaml |
null | ceph-main/qa/erasure-code/ec-rados-default.yaml | tasks:
- rados:
clients: [client.0]
ops: 4000
objects: 50
ec_pool: true
write_append_excl: false
op_weights:
read: 100
write: 0
append: 100
delete: 50
snap_create: 50
snap_remove: 50
rollback: 50
copy_from: 50
... | 388 | 18.45 | 36 | yaml |
null | ceph-main/qa/erasure-code/ec-rados-parallel.yaml | workload:
parallel:
- rados:
clients: [client.0]
ops: 4000
objects: 50
ec_pool: true
write_append_excl: false
op_weights:
read: 100
write: 0
append: 100
delete: 50
snap_create: 50
snap_remove: 50
rollback: 50
... | 427 | 19.380952 | 42 | yaml |
null | ceph-main/qa/erasure-code/ec-rados-plugin=clay-k=4-m=2.yaml | tasks:
- rados:
clients: [client.0]
ops: 4000
objects: 50
ec_pool: true
write_append_excl: false
erasure_code_profile:
name: clay42profile
plugin: clay
k: 4
m: 2
technique: reed_sol_van
crush-failure-domain: osd
op_weights:
read: 100
write: 0
... | 472 | 17.192308 | 31 | yaml |
null | ceph-main/qa/erasure-code/ec-rados-plugin=isa-k=2-m=1.yaml | tasks:
- rados:
clients: [client.0]
ops: 4000
objects: 50
ec_pool: true
min_size: 2
write_append_excl: false
erasure_code_profile:
name: isaprofile
plugin: isa
k: 2
m: 1
technique: reed_sol_van
crush-failure-domain: osd
op_weights:
read: 100
... | 484 | 16.962963 | 31 | yaml |
null | ceph-main/qa/erasure-code/ec-rados-plugin=jerasure-k=2-m=1.yaml | tasks:
- rados:
clients: [client.0]
ops: 4000
objects: 50
ec_pool: true
write_append_excl: false
erasure_code_profile:
name: jerasure21profile
plugin: jerasure
k: 2
m: 1
technique: reed_sol_van
crush-failure-domain: osd
op_weights:
read: 100
wr... | 480 | 17.5 | 31 | yaml |
null | ceph-main/qa/erasure-code/ec-rados-plugin=jerasure-k=3-m=1.yaml | #
# k=3 implies a stripe_width of 1376*3 = 4128 which is different from
# the default value of 4096 It is also not a multiple of 1024*1024 and
# creates situations where rounding rules during recovery becomes
# necessary.
#
tasks:
- rados:
clients: [client.0]
ops: 4000
objects: 50
ec_pool: true
wr... | 706 | 21.09375 | 70 | yaml |
null | ceph-main/qa/erasure-code/ec-rados-plugin=jerasure-k=4-m=2.yaml | tasks:
- rados:
clients: [client.0]
ops: 4000
objects: 50
ec_pool: true
write_append_excl: false
erasure_code_profile:
name: jerasure21profile
plugin: jerasure
k: 4
m: 2
technique: reed_sol_van
crush-failure-domain: osd
op_weights:
read: 100
wr... | 480 | 17.5 | 31 | yaml |
null | ceph-main/qa/erasure-code/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml | tasks:
- rados:
clients: [client.0]
ops: 400
objects: 50
ec_pool: true
write_append_excl: false
erasure_code_profile:
name: lrcprofile
plugin: lrc
k: 4
m: 2
l: 3
crush-failure-domain: osd
op_weights:
read: 100
write: 0
append: 100
d... | 448 | 16.269231 | 31 | yaml |
null | ceph-main/qa/erasure-code/ec-rados-plugin=shec-k=4-m=3-c=2.yaml | tasks:
- rados:
clients: [client.0]
ops: 400
objects: 50
ec_pool: true
write_append_excl: false
erasure_code_profile:
name: shecprofile
plugin: shec
k: 4
m: 3
c: 2
crush-failure-domain: osd
op_weights:
read: 100
write: 0
append: 100
... | 450 | 16.346154 | 31 | yaml |
null | ceph-main/qa/erasure-code/ec-rados-sequential.yaml | workload:
sequential:
- rados:
clients: [client.0]
ops: 4000
objects: 50
ec_pool: true
write_append_excl: false
op_weights:
read: 100
write: 0
append: 100
delete: 50
snap_create: 50
snap_remove: 50
rollback: 5... | 431 | 19.571429 | 44 | yaml |
null | ceph-main/qa/libceph/trivial_libceph.c | #define _FILE_OFFSET_BITS 64
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/statvfs.h>
#include "../../src/include/cephfs/libcephfs.h"
#define MB64 (1<<26)
int main(int argc, const char **argv)
{
struct ceph_mount_info *... | 1,709 | 23.428571 | 72 | c |
null | ceph-main/qa/machine_types/schedule_rados_ovh.sh | #!/usr/bin/env bash
# $1 - part
# $2 - branch name
# $3 - machine name
# $4 - email address
# $5 - filter out (this arg is to be at the end of the command line for now)
## example #1
## (date +%U) week number
## % 2 - mod 2 (e.g. 0,1,0,1 ...)
## * 7 - multiplied by 7 (e.g. 0,7,0,7...)
## $1 day of the week (0-6)
##... | 1,217 | 33.8 | 145 | sh |
null | ceph-main/qa/machine_types/schedule_subset.sh | #!/bin/bash -e
#command line => CEPH_BRANCH=<branch>; MACHINE_NAME=<machine_type>; SUITE_NAME=<suite>; ../schedule_subset.sh <day_of_week> $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL <$FILTER>
partitions="$1"
shift
branch="$1"
shift
machine="$1"
shift
suite="$1"
shift
email="$1"
shift
kernel="$1"
sh... | 649 | 29.952381 | 234 | sh |
null | ceph-main/qa/machine_types/vps.yaml | overrides:
ceph:
conf:
global:
osd heartbeat grace: 100
# this line to address issue #1017
mon lease: 15
mon lease ack timeout: 25
s3tests:
idle_timeout: 1200
ceph-fuse:
client.0:
mount_wait: 60
mount_timeout: 120
| 285 | 18.066667 | 43 | yaml |
null | ceph-main/qa/mds/test_anchortable.sh | #!/usr/bin/env bash
set -x
mkdir links
for f in `seq 1 8`
do
mkdir $f
for g in `seq 1 20`
do
touch $f/$g
ln $f/$g links/$f.$g
done
done
for f in `seq 1 8`
do
echo testing failure point $f
bash -c "pushd . ; cd $bindir ; sleep 10; ./ceph -c $conf mds tell \* injectargs \"--mds_kill_mdstable_a... | 506 | 17.107143 | 124 | sh |
null | ceph-main/qa/mds/test_mdstable_failures.sh | #!/usr/bin/env bash
set -x
for f in `seq 1 8`
do
echo testing failure point $f
pushd . ; cd $bindir ; ./ceph -c $conf mds tell \* injectargs "--mds_kill_mdstable_at $f" ; popd
sleep 1 # wait for mds command to go thru
bash -c "pushd . ; cd $bindir ; sleep 10 ; ./init-ceph -c $conf start mds ; popd" &
... | 370 | 23.733333 | 100 | sh |
null | ceph-main/qa/mgr_ttl_cache/disable.yaml | overrides:
ceph:
conf:
mgr:
mgr ttl cache expire seconds: 0
| 80 | 12.5 | 39 | yaml |
null | ceph-main/qa/mgr_ttl_cache/enable.yaml | overrides:
ceph:
conf:
mgr:
mgr ttl cache expire seconds: 5
| 80 | 12.5 | 39 | yaml |
null | ceph-main/qa/mon/bootstrap/host.sh | #!/bin/sh -ex
cwd=`pwd`
cat > conf <<EOF
[global]
mon host = 127.0.0.1:6789
[mon]
admin socket =
log file = $cwd/\$name.log
debug mon = 20
debug ms = 1
EOF
rm -f mm
fsid=`uuidgen`
rm -f keyring
ceph-authtool --create-keyring keyring --gen-key -n client.admin
ceph-authtool keyring --gen-key -n mon.
ceph-mon -c con... | 477 | 15.482759 | 69 | sh |
null | ceph-main/qa/mon/bootstrap/initial_members.sh | #!/bin/sh -ex
cwd=`pwd`
cat > conf <<EOF
[mon]
admin socket =
log file = $cwd/\$name.log
debug mon = 20
debug ms = 1
mon initial members = a,b,d
EOF
rm -f mm
monmaptool --create mm \
--add a 127.0.0.1:6789 \
--add b 127.0.0.1:6790 \
--add c 127.0.0.1:6791
rm -f keyring
ceph-authtool --create-keyring key... | 959 | 23 | 84 | sh |
null | ceph-main/qa/mon/bootstrap/initial_members_asok.sh | #!/bin/sh -ex
cwd=`pwd`
cat > conf <<EOF
[mon]
log file = $cwd/\$name.log
debug mon = 20
debug ms = 1
debug asok = 20
mon initial members = a,b,d
admin socket = $cwd/\$name.asok
EOF
rm -f mm
fsid=`uuidgen`
rm -f keyring
ceph-authtool --create-keyring keyring --gen-key -n client.admin
ceph-authtool keyring --gen-key ... | 1,642 | 23.522388 | 80 | sh |
null | ceph-main/qa/mon/bootstrap/simple.sh | #!/bin/sh -e
cwd=`pwd`
cat > conf <<EOF
[mon]
admin socket =
EOF
rm -f mm
monmaptool --create mm \
--add a 127.0.0.1:6789 \
--add b 127.0.0.1:6790 \
--add c 127.0.0.1:6791
rm -f keyring
ceph-authtool --create-keyring keyring --gen-key -n client.admin
ceph-authtool keyring --gen-key -n mon.
ceph-mon -c ... | 863 | 22.351351 | 79 | sh |
null | ceph-main/qa/mon/bootstrap/simple_expand.sh | #!/bin/sh -ex
cwd=`pwd`
cat > conf <<EOF
[mon]
admin socket =
log file = $cwd/\$name.log
debug mon = 20
debug ms = 1
EOF
rm -f mm
monmaptool --create mm \
--add a 127.0.0.1:6789 \
--add b 127.0.0.1:6790 \
--add c 127.0.0.1:6791
rm -f keyring
ceph-authtool --create-keyring keyring --gen-key -n client.adm... | 1,495 | 23.52459 | 83 | sh |
null | ceph-main/qa/mon/bootstrap/simple_expand_monmap.sh | #!/bin/sh -ex
cwd=`pwd`
cat > conf <<EOF
[mon]
admin socket =
EOF
rm -f mm
monmaptool --create mm \
--add a 127.0.0.1:6789 \
--add b 127.0.0.1:6790 \
--add c 127.0.0.1:6791
rm -f keyring
ceph-authtool --create-keyring keyring --gen-key -n client.admin
ceph-authtool keyring --gen-key -n mon.
ceph-mon -c... | 1,084 | 23.111111 | 73 | sh |
null | ceph-main/qa/mon/bootstrap/simple_single_expand.sh | #!/bin/sh -ex
cwd=`pwd`
cat > conf <<EOF
[mon]
admin socket =
log file = $cwd/\$name.log
debug mon = 20
debug ms = 1
EOF
rm -f mm
monmaptool --create mm \
--add a 127.0.0.1:6789
rm -f keyring
ceph-authtool --create-keyring keyring --gen-key -n client.admin
ceph-authtool keyring --gen-key -n mon.
ceph-mon -c co... | 1,193 | 20.709091 | 79 | sh |
null | ceph-main/qa/mon/bootstrap/simple_single_expand2.sh | #!/bin/sh -ex
cwd=`pwd`
cat > conf <<EOF
[mon]
admin socket =
log file = $cwd/\$name.log
debug mon = 20
debug ms = 1
EOF
rm -f mm
ip=`host \`hostname\` | awk '{print $4}'`
monmaptool --create mm \
--add a $ip:6779
rm -f keyring
ceph-authtool --create-keyring keyring --gen-key -n client.admin
ceph-authtool keyri... | 882 | 20.536585 | 77 | sh |
null | ceph-main/qa/mon/bootstrap/single_host.sh | #!/bin/sh -ex
cwd=`pwd`
cat > conf <<EOF
[global]
mon host = 127.0.0.1:6789
[mon]
admin socket =
log file = $cwd/\$name.log
debug mon = 20
debug ms = 1
EOF
rm -f mm
fsid=`uuidgen`
rm -f keyring
ceph-authtool --create-keyring keyring --gen-key -n client.admin
ceph-authtool keyring --gen-key -n mon.
ceph-mon -c con... | 482 | 15.655172 | 74 | sh |
null | ceph-main/qa/mon/bootstrap/single_host_multi.sh | #!/bin/sh -ex
cwd=`pwd`
cat > conf <<EOF
[global]
[mon]
admin socket =
log file = $cwd/\$name.log
debug mon = 20
debug ms = 1
mon host = 127.0.0.1:6789 127.0.0.1:6790 127.0.0.1:6791
EOF
rm -f mm
fsid=`uuidgen`
rm -f keyring
ceph-authtool --create-keyring keyring --gen-key -n client.admin
ceph-authtool keyring --ge... | 970 | 23.897436 | 103 | sh |
null | ceph-main/qa/mon_election/classic.yaml | overrides:
ceph:
conf:
global:
mon election default strategy: 1 | 83 | 15.8 | 40 | yaml |
null | ceph-main/qa/mon_election/connectivity.yaml | overrides:
ceph:
conf:
global:
mon election default strategy: 3 | 83 | 15.8 | 40 | yaml |
null | ceph-main/qa/msgr/async-v1only.yaml | overrides:
ceph:
mon_bind_msgr2: false
conf:
global:
ms type: async
ms bind msgr2: false
| 121 | 14.25 | 28 | yaml |
null | ceph-main/qa/msgr/async-v2only.yaml | overrides:
ceph:
conf:
global:
ms type: async
ms bind msgr2: true
ms bind msgr1: false
| 123 | 14.5 | 28 | yaml |
null | ceph-main/qa/msgr/async.yaml | overrides:
ceph:
conf:
global:
ms type: async
| 66 | 10.166667 | 22 | yaml |
null | ceph-main/qa/objectstore/bluestore-bitmap.yaml | overrides:
thrashosds:
bdev_inject_crash: 2
bdev_inject_crash_probability: .5
ceph:
fs: xfs
conf:
osd:
osd objectstore: bluestore
bluestore block size: 96636764160
debug bluestore: 1/20
debug bluefs: 1/20
debug rocksdb: 4/10
bluestore fsck on mou... | 1,339 | 29.454545 | 90 | yaml |
null | ceph-main/qa/objectstore/bluestore-comp-lz4.yaml | overrides:
thrashosds:
bdev_inject_crash: 2
bdev_inject_crash_probability: .5
ceph:
fs: xfs
conf:
osd:
osd objectstore: bluestore
bluestore block size: 96636764160
debug bluestore: 1/20
debug bluefs: 1/20
debug rocksdb: 4/10
bluestore compression... | 775 | 30.04 | 90 | yaml |
null | ceph-main/qa/objectstore/bluestore-comp-snappy.yaml | overrides:
thrashosds:
bdev_inject_crash: 2
bdev_inject_crash_probability: .5
ceph:
fs: xfs
conf:
osd:
osd objectstore: bluestore
bluestore block size: 96636764160
debug bluestore: 1/20
debug bluefs: 1/20
debug rocksdb: 4/10
bluestore compression... | 778 | 30.16 | 90 | yaml |
null | ceph-main/qa/objectstore/bluestore-comp-zlib.yaml | overrides:
thrashosds:
bdev_inject_crash: 2
bdev_inject_crash_probability: .5
ceph:
fs: xfs
conf:
osd:
osd objectstore: bluestore
bluestore block size: 96636764160
debug bluestore: 1/20
debug bluefs: 1/20
debug rocksdb: 4/10
bluestore compression... | 776 | 30.08 | 90 | yaml |
null | ceph-main/qa/objectstore/bluestore-comp-zstd.yaml | overrides:
thrashosds:
bdev_inject_crash: 2
bdev_inject_crash_probability: .5
ceph:
fs: xfs
conf:
osd:
osd objectstore: bluestore
bluestore block size: 96636764160
debug bluestore: 1/20
debug bluefs: 1/20
debug rocksdb: 4/10
bluestore compression... | 776 | 30.08 | 90 | yaml |
null | ceph-main/qa/objectstore/bluestore-hybrid.yaml | overrides:
thrashosds:
bdev_inject_crash: 2
bdev_inject_crash_probability: .5
ceph:
fs: xfs
conf:
osd:
osd objectstore: bluestore
bluestore block size: 96636764160
debug bluestore: 1/20
debug bluefs: 1/20
debug rocksdb: 4/10
bluestore fsck on mou... | 1,238 | 29.219512 | 90 | yaml |
null | ceph-main/qa/objectstore/bluestore-low-osd-mem-target.yaml | overrides:
thrashosds:
bdev_inject_crash: 2
bdev_inject_crash_probability: .5
ceph:
fs: xfs
conf:
osd:
osd objectstore: bluestore
osd memory target: 1610612736 # reduced to 1.5_G
bluestore block size: 96636764160
debug bluestore: 1/20
debug bluefs: 1/20
... | 807 | 30.076923 | 90 | yaml |
null | ceph-main/qa/objectstore/bluestore-stupid.yaml | overrides:
thrashosds:
bdev_inject_crash: 2
bdev_inject_crash_probability: .5
ceph:
fs: xfs
conf:
osd:
osd objectstore: bluestore
bluestore block size: 96636764160
debug bluestore: 1/20
debug bluefs: 1/20
debug rocksdb: 4/10
bluestore fsck on mou... | 1,339 | 29.454545 | 90 | yaml |
null | ceph-main/qa/objectstore_cephfs/bluestore-bitmap.yaml | ../objectstore/bluestore-bitmap.yaml | 36 | 36 | 36 | yaml |
null | ceph-main/qa/objectstore_debug/bluestore-bitmap.yaml | overrides:
thrashosds:
bdev_inject_crash: 2
bdev_inject_crash_probability: .5
ceph:
fs: xfs
conf:
osd:
osd objectstore: bluestore
bluestore block size: 96636764160
debug bluestore: 20
debug bluefs: 20
debug rocksdb: 10
bluestore fsck on mount: tr... | 1,327 | 29.181818 | 90 | yaml |
null | ceph-main/qa/objectstore_debug/bluestore-comp-lz4.yaml | overrides:
thrashosds:
bdev_inject_crash: 2
bdev_inject_crash_probability: .5
ceph:
fs: xfs
conf:
osd:
osd objectstore: bluestore
bluestore block size: 96636764160
debug bluestore: 20
debug bluefs: 20
debug rocksdb: 10
bluestore compression mode:... | 769 | 29.8 | 90 | yaml |
null | ceph-main/qa/objectstore_debug/bluestore-comp-snappy.yaml | overrides:
thrashosds:
bdev_inject_crash: 2
bdev_inject_crash_probability: .5
ceph:
fs: xfs
conf:
osd:
osd objectstore: bluestore
bluestore block size: 96636764160
debug bluestore: 20
debug bluefs: 20
debug rocksdb: 10
bluestore compression mode:... | 772 | 29.92 | 90 | yaml |
null | ceph-main/qa/objectstore_debug/bluestore-comp-zlib.yaml | overrides:
thrashosds:
bdev_inject_crash: 2
bdev_inject_crash_probability: .5
ceph:
fs: xfs
conf:
osd:
osd objectstore: bluestore
bluestore block size: 96636764160
debug bluestore: 20
debug bluefs: 20
debug rocksdb: 10
bluestore compression mode:... | 770 | 29.84 | 90 | yaml |
null | ceph-main/qa/objectstore_debug/bluestore-comp-zstd.yaml | overrides:
thrashosds:
bdev_inject_crash: 2
bdev_inject_crash_probability: .5
ceph:
fs: xfs
conf:
osd:
osd objectstore: bluestore
bluestore block size: 96636764160
debug bluestore: 20
debug bluefs: 20
debug rocksdb: 10
bluestore compression mode:... | 770 | 29.84 | 90 | yaml |
null | ceph-main/qa/objectstore_debug/bluestore-hybrid.yaml | overrides:
thrashosds:
bdev_inject_crash: 2
bdev_inject_crash_probability: .5
ceph:
fs: xfs
conf:
osd:
osd objectstore: bluestore
bluestore block size: 96636764160
debug bluestore: 20
debug bluefs: 20
debug rocksdb: 10
bluestore fsck on mount: tr... | 1,226 | 28.926829 | 90 | yaml |
null | ceph-main/qa/objectstore_debug/bluestore-low-osd-mem-target.yaml | overrides:
thrashosds:
bdev_inject_crash: 2
bdev_inject_crash_probability: .5
ceph:
fs: xfs
conf:
osd:
osd objectstore: bluestore
osd memory target: 1610612736 # reduced to 1.5_G
bluestore block size: 96636764160
debug bluestore: 20
debug bluefs: 20
... | 801 | 29.846154 | 90 | yaml |
null | ceph-main/qa/objectstore_debug/bluestore-stupid.yaml | overrides:
thrashosds:
bdev_inject_crash: 2
bdev_inject_crash_probability: .5
ceph:
fs: xfs
conf:
osd:
osd objectstore: bluestore
bluestore block size: 96636764160
debug bluestore: 20
debug bluefs: 20
debug rocksdb: 10
bluestore fsck on mount: tr... | 1,327 | 29.181818 | 90 | yaml |
null | ceph-main/qa/overrides/2-size-1-min-size.yaml | overrides:
ceph:
conf:
global:
osd_pool_default_size: 2
osd_pool_default_min_size: 1
| 113 | 15.285714 | 36 | yaml |
null | ceph-main/qa/overrides/2-size-2-min-size.yaml | overrides:
ceph:
conf:
global:
osd_pool_default_size: 2
osd_pool_default_min_size: 2
log-ignorelist:
- \(REQUEST_STUCK\)
| 159 | 16.777778 | 36 | yaml |
null | ceph-main/qa/overrides/3-size-2-min-size.yaml | overrides:
thrashosds:
min_in: 4
ceph:
conf:
global:
osd_pool_default_size: 3
osd_pool_default_min_size: 2
| 141 | 14.777778 | 36 | yaml |
null | ceph-main/qa/overrides/ignorelist_wrongly_marked_down.yaml | overrides:
ceph:
log-ignorelist:
- but it is still running
conf:
mds:
debug mds: 20
debug ms: 1
client:
debug client: 10 | 170 | 16.1 | 29 | yaml |
null | ceph-main/qa/overrides/more-active-recovery.yaml | overrides:
ceph:
conf:
global:
osd_recovery_max_active: 10
osd_recovery_max_single_start: 10
| 121 | 16.428571 | 41 | yaml |
null | ceph-main/qa/overrides/no_client_pidfile.yaml | overrides:
ceph:
conf:
client:
pid file: ""
| 64 | 9.833333 | 20 | yaml |
null | ceph-main/qa/overrides/nvme_loop.yaml | tasks:
- nvme_loop:
| 20 | 6 | 12 | yaml |
null | ceph-main/qa/overrides/short_pg_log.yaml | overrides:
ceph:
conf:
global:
osd_min_pg_log_entries: 1
osd_max_pg_log_entries: 2
osd_pg_log_trim_min: 0
| 142 | 16.875 | 33 | yaml |
null | ceph-main/qa/packages/packages.yaml | ---
ceph:
deb:
- ceph
- cephadm
- ceph-mds
- ceph-mgr
- ceph-common
- ceph-fuse
- ceph-test
- ceph-volume
- radosgw
- python3-rados
- python3-rgw
- python3-cephfs
- python3-rbd
- libcephfs2
- libcephfs-dev
- librados2
- librbd1
- rbd-fuse
- ceph-common-dbg
- ceph-fuse-dbg
- c... | 1,558 | 17.127907 | 41 | yaml |
null | ceph-main/qa/qa_scripts/cephscrub.sh | # remove the ceph directories
sudo rm -rf /var/log/ceph
sudo rm -rf /var/lib/ceph
sudo rm -rf /etc/ceph
sudo rm -rf /var/run/ceph
# remove the ceph packages
sudo apt-get -y purge ceph
sudo apt-get -y purge ceph-dbg
sudo apt-get -y purge ceph-mds
sudo apt-get -y purge ceph-mds-dbg
sudo apt-get -y purge ceph-fuse
su... | 991 | 31 | 43 | sh |
null | ceph-main/qa/qa_scripts/openstack/ceph_install.sh | #!/usr/bin/env bash
#
# Install a simple ceph cluster upon which openstack images will be stored.
#
set -fv
ceph_node=${1}
source copy_func.sh
copy_file files/$OS_CEPH_ISO $ceph_node .
copy_file execs/ceph_cluster.sh $ceph_node . 0777
copy_file execs/ceph-pool-create.sh $ceph_node . 0777
ssh $ceph_node ./ceph_cluster.... | 326 | 26.25 | 75 | sh |
null | ceph-main/qa/qa_scripts/openstack/connectceph.sh | #!/usr/bin/env bash
#
# Connect openstack node just installed to a ceph cluster.
#
# Essentially implements:
#
# http://docs.ceph.com/en/latest/rbd/rbd-openstack/
#
# The directory named files contains templates for the /etc/glance/glance-api.conf,
# /etc/cinder/cinder.conf, /etc/nova/nova.conf Openstack files
#
set -f... | 2,662 | 58.177778 | 189 | sh |
null | ceph-main/qa/qa_scripts/openstack/copy_func.sh | #
# copy_file(<filename>, <node>, <directory>, [<permissions>], [<owner>]
#
# copy a file -- this is needed because passwordless ssh does not
# work when sudo'ing.
# <file> -- name of local file to be copied
# <node> -- node where we want the file
# <directory> -- location where we want the fil... | 731 | 30.826087 | 71 | sh |
null | ceph-main/qa/qa_scripts/openstack/fix_conf_file.sh | source ./copy_func.sh
#
# Take a templated file, modify a local copy, and write it to the
# remote site.
#
# Usage: fix_conf_file <remote-site> <file-name> <remote-location> [<rbd-secret>]
# <remote-site> -- site where we want this modified file stored.
# <file-name> -- name of the remote file.
# <remote... | 999 | 33.482759 | 81 | sh |
null | ceph-main/qa/qa_scripts/openstack/image_create.sh | #!/usr/bin/env bash
#
# Set up a vm on packstack. Use the iso in RHEL_ISO (defaults to home dir)
#
set -fv
source ./copy_func.sh
source ./fix_conf_file.sh
openstack_node=${1}
ceph_node=${2}
RHEL_ISO=${RHEL_ISO:-~/rhel-server-7.2-x86_64-boot.iso}
copy_file ${RHEL_ISO} $openstack_node .
copy_file execs/run_openstack.sh... | 491 | 27.941176 | 100 | sh |
null | ceph-main/qa/qa_scripts/openstack/openstack.sh | #!/usr/bin/env bash
#
# Install Openstack.
# Usage: openstack <openstack-site> <ceph-monitor>
#
# This script installs Openstack on one node, and connects it to a ceph
# cluster on another set of nodes. It is intended to run from a third
# node.
#
# Assumes a single node Openstack cluster and a single monitor ceph... | 763 | 25.344828 | 71 | sh |
null | ceph-main/qa/qa_scripts/openstack/packstack.sh | #!/usr/bin/env bash
#
# Install openstack by running packstack.
#
# Implements the operations in:
# https://docs.google.com/document/d/1us18KR3LuLyINgGk2rmI-SVj9UksCE7y4C2D_68Aa8o/edit?ts=56a78fcb
#
# The directory named files contains a template for the kilo.conf file used by packstack.
#
set -fv
source ./copy_func.sh... | 604 | 27.809524 | 98 | sh |
null | ceph-main/qa/qa_scripts/openstack/ceph_install_w_ansible/ceph_install.sh | #! /usr/bin/env bash
if [ $# -ne 5 ]; then
echo 'Usage: ceph_install.sh <admin-node> <mon-node> <osd-node> <osd-node> <osd-node>'
exit -1
fi
allnodes=$*
adminnode=$1
shift
cephnodes=$*
monnode=$1
shift
osdnodes=$*
./multi_action.sh cdn_setup.sh $allnodes
./talknice.sh $allnodes
for mac in $allnodes; do
ssh ... | 1,184 | 28.625 | 90 | sh |
null | ceph-main/qa/qa_scripts/openstack/ceph_install_w_ansible/copy_func.sh | ../copy_func.sh | 15 | 15 | 15 | sh |
null | ceph-main/qa/qa_scripts/openstack/ceph_install_w_ansible/multi_action.sh | #! /usr/bin/env bash
source copy_func.sh
allparms=$*
cmdv=$1
shift
sites=$*
for mac in $sites; do
echo $cmdv $mac
if [ -f ~/secrets ]; then
copy_file ~/secrets $mac . 0777 ubuntu:ubuntu
fi
copy_file execs/${cmdv} $mac . 0777 ubuntu:ubuntu
ssh $mac ./${cmdv} &
done
./staller.sh $allparms
for ... | 388 | 18.45 | 53 | sh |
null | ceph-main/qa/qa_scripts/openstack/ceph_install_w_ansible/repolocs.sh | #! /usr/bin/env bash
SPECIFIC_VERSION=latest-Ceph-2-RHEL-7
#SPECIFIC_VERSION=Ceph-2-RHEL-7-20160630.t.0
#SPECIFIC_VERSION=Ceph-2.0-RHEL-7-20160718.t.0
export CEPH_REPO_TOOLS=http://download.eng.bos.redhat.com/rcm-guest/ceph-drops/auto/ceph-2-rhel-7-compose/${SPECIFIC_VERSION}/compose/Tools/x86_64/os/
export CEPH_REPO_... | 760 | 83.555556 | 162 | sh |
null | ceph-main/qa/qa_scripts/openstack/ceph_install_w_ansible/staller.sh | #! /usr/bin/env bash
cmd_wait=$1
shift
sites=$*
donebit=0
while [ $donebit -ne 1 ]; do
sleep 10
donebit=1
for rem in $sites; do
rval=`ssh $rem ps aux | grep $cmd_wait | wc -l`
if [ $rval -gt 0 ]; then
donebit=0
fi
done
done
| 277 | 16.375 | 56 | sh |
null | ceph-main/qa/qa_scripts/openstack/ceph_install_w_ansible/talknice.sh | #!/usr/bin/env bash
declare -A rsapub
for fulln in $*; do
sname=`echo $fulln | sed 's/\..*//'`
nhead=`echo $sname | sed 's/[0-9]*//g'`
x=`ssh $fulln "ls .ssh/id_rsa"`
if [ -z $x ]; then
ssh $fulln "ssh-keygen -N '' -f .ssh/id_rsa";
fi
xx=`ssh $fulln "ls .ssh/config"`
if [ -z $xx ]; t... | 884 | 28.5 | 86 | sh |
null | ceph-main/qa/qa_scripts/openstack/ceph_install_w_ansible/execs/cdn_setup.sh | #! /usr/bin/env bash
if [ -f ~/secrets ]; then
source ~/secrets
fi
subm=`which subscription-manager`
if [ ${#subm} -eq 0 ]; then
sudo yum -y update
exit
fi
subst=`sudo subscription-manager status | grep "^Overall" | awk '{print $NF}'`
if [ $subst == 'Unknown' ]; then
mynameis=${subscrname:-'inigomontoya... | 708 | 32.761905 | 88 | sh |
null | ceph-main/qa/qa_scripts/openstack/ceph_install_w_ansible/execs/ceph_ansible.sh | #! /usr/bin/env bash
cephnodes=$*
monnode=$1
sudo yum -y install ceph-ansible
cd
sudo ./edit_ansible_hosts.sh $cephnodes
mkdir ceph-ansible-keys
cd /usr/share/ceph-ansible/group_vars/
if [ -f ~/ip_info ]; then
source ~/ip_info
fi
mon_intf=${mon_intf:-'eno1'}
pub_netw=${pub_netw:-'10.8.128.0\/21'}
sudo cp all.sample... | 1,464 | 38.594595 | 100 | sh |
null | ceph-main/qa/qa_scripts/openstack/ceph_install_w_ansible/execs/edit_ansible_hosts.sh | #! /usr/bin/env bash
ed /etc/ansible/hosts << EOF
$
a
[mons]
${1}
[osds]
${2}
${3}
${4}
.
w
q
EOF
| 101 | 4.666667 | 28 | sh |
null | ceph-main/qa/qa_scripts/openstack/ceph_install_w_ansible/execs/edit_groupvars_osds.sh | #! /usr/bin/env bash
ed /usr/share/ceph-ansible/group_vars/osds << EOF
$
/^devices:
.+1
i
- /dev/sdb
- /dev/sdc
- /dev/sdd
.
w
q
EOF
| 142 | 9.214286 | 49 | sh |
null | ceph-main/qa/qa_scripts/openstack/execs/ceph-pool-create.sh | #!/usr/bin/env bash
set -f
#
# On the ceph site, make the pools required for Openstack
#
#
# Make a pool, if it does not already exist.
#
function make_pool {
if [[ -z `sudo ceph osd lspools | grep " $1,"` ]]; then
echo "making $1"
sudo ceph osd pool create $1 128
fi
}
#
# Make sure the pg_nu... | 699 | 19 | 62 | sh |
null | ceph-main/qa/qa_scripts/openstack/execs/ceph_cluster.sh | #!/usr/bin/env bash
set -f
echo $OS_CEPH_ISO
if [[ $# -ne 4 ]]; then
echo "Usage: ceph_cluster mon.0 osd.0 osd.1 osd.2"
exit -1
fi
allsites=$*
mon=$1
shift
osds=$*
ISOVAL=${OS_CEPH_ISO-rhceph-1.3.1-rhel-7-x86_64-dvd.iso}
sudo mount -o loop ${ISOVAL} /mnt
fqdn=`hostname -f`
lsetup=`ls /mnt/Installer | grep "^i... | 1,092 | 20.431373 | 57 | sh |
null | ceph-main/qa/qa_scripts/openstack/execs/libvirt-secret.sh | #!/usr/bin/env bash
set -f
#
# Generate a libvirt secret on the Openstack node.
#
openstack_node=${1}
uuid=`uuidgen`
cat > secret.xml <<EOF
<secret ephemeral='no' private='no'>
<uuid>${uuid}</uuid>
<usage type='ceph'>
<name>client.cinder secret</name>
</usage>
</secret>
EOF
sudo virsh secret-define --file s... | 422 | 20.15 | 78 | sh |
null | ceph-main/qa/qa_scripts/openstack/execs/openstack-preinstall.sh | #!/usr/bin/env bash
set -f
#
# Remotely setup the stuff needed to run packstack. This should do items 1-4 in
# https://docs.google.com/document/d/1us18KR3LuLyINgGk2rmI-SVj9UksCE7y4C2D_68Aa8o/edit?ts=56a78fcb
#
yum remove -y rhos-release
rpm -ivh http://rhos-release.virt.bos.redhat.com/repos/rhos-release/rhos-release-... | 554 | 29.833333 | 98 | sh |
null | ceph-main/qa/qa_scripts/openstack/execs/run_openstack.sh | #!/usr/bin/env bash
set -fv
#
# Create a glance image, a corresponding cinder volume, a nova instance, attach, the cinder volume to the
# nova instance, and create a backup.
#
image_name=${1}X
file_name=${2-rhel-server-7.2-x86_64-boot.iso}
source ./keystonerc_admin
glance image-create --name $image_name --disk-format... | 986 | 40.125 | 105 | sh |
null | ceph-main/qa/qa_scripts/openstack/execs/start_openstack.sh | #!/usr/bin/env bash
set -fv
#
# start the Openstack services
#
sudo cp /root/keystonerc_admin ./keystonerc_admin
sudo chmod 0644 ./keystonerc_admin
source ./keystonerc_admin
sudo service httpd stop
sudo service openstack-keystone restart
sudo service openstack-glance-api restart
sudo service openstack-nova-compute res... | 415 | 25 | 49 | sh |
null | ceph-main/qa/rbd/common.sh | #!/usr/bin/env bash
die() {
echo "$*"
exit 1
}
cleanup() {
rm -rf $TDIR
TDIR=""
}
set_variables() {
# defaults
[ -z "$bindir" ] && bindir=$PWD # location of init-ceph
if [ -z "$conf" ]; then
conf="$basedir/ceph.conf"
[ -e $conf ] || conf="/etc/ceph/ceph.conf"
fi
[ ... | 2,161 | 19.788462 | 76 | sh |
null | ceph-main/qa/rbd/rbd.sh | #!/usr/bin/env bash
set -x
basedir=`echo $0 | sed 's/[^/]*$//g'`.
. $basedir/common.sh
rbd_test_init
create_multiple() {
for i in `seq 1 10`; do
rbd_create_image $i
done
for i in `seq 1 10`; do
rbd_add $i
done
for i in `seq 1 10`; do
devname=/dev/rbd`eval echo \\$rbd$i`
echo $devname
done
for i in `... | 676 | 12.27451 | 38 | sh |
null | ceph-main/qa/releases/infernalis.yaml | tasks:
- exec:
osd.0:
- ceph osd set sortbitwise
- for p in `ceph osd pool ls` ; do ceph osd pool set $p use_gmt_hitset true ; done
| 148 | 23.833333 | 88 | yaml |
null | ceph-main/qa/releases/jewel.yaml | tasks:
- exec:
osd.0:
- ceph osd set sortbitwise
- ceph osd set require_jewel_osds
- for p in `ceph osd pool ls` ; do ceph osd pool set $p use_gmt_hitset true ; done
| 188 | 26 | 88 | yaml |
null | ceph-main/qa/releases/kraken.yaml | tasks:
- exec:
osd.0:
- ceph osd set require_kraken_osds
| 67 | 12.6 | 40 | yaml |
null | ceph-main/qa/releases/luminous-with-mgr.yaml | tasks:
- exec:
osd.0:
- ceph osd require-osd-release luminous
- ceph osd set-require-min-compat-client luminous
- ceph.healthy:
overrides:
ceph:
conf:
mon:
mon warn on osd down out interval zero: false
| 238 | 18.916667 | 55 | yaml |
null | ceph-main/qa/releases/luminous.yaml | tasks:
- exec:
mgr.x:
- mkdir -p /var/lib/ceph/mgr/ceph-x
- ceph auth get-or-create-key mgr.x mon 'allow profile mgr'
- ceph auth export mgr.x > /var/lib/ceph/mgr/ceph-x/keyring
- ceph.restart:
daemons: [mgr.x]
wait-for-healthy: false
- exec:
osd.0:
- ceph osd require-osd-release... | 538 | 23.5 | 65 | yaml |
null | ceph-main/qa/releases/mimic.yaml | tasks:
- exec:
osd.0:
- ceph osd require-osd-release mimic
- ceph osd set-require-min-compat-client mimic
- ceph.healthy:
| 138 | 18.857143 | 52 | yaml |
null | ceph-main/qa/releases/nautilus.yaml | tasks:
- exec:
osd.0:
- ceph osd require-osd-release nautilus
- ceph osd set-require-min-compat-client nautilus
- for p in `ceph osd pool ls`; do ceph osd pool set $p pg_autoscale_mode off; done
- ceph.healthy:
| 233 | 28.25 | 88 | yaml |
null | ceph-main/qa/releases/octopus.yaml | tasks:
- exec:
osd.0:
- ceph osd require-osd-release octopus
- ceph osd set-require-min-compat-client octopus
- for f in `ceph osd pool ls` ; do ceph osd pool set $f pg_autoscale_mode off ; done
- ceph.healthy:
| 233 | 28.25 | 90 | yaml |
null | ceph-main/qa/releases/pacific-from-o.yaml | tasks:
- exec:
osd.0:
- ceph osd require-osd-release pacific
- ceph osd set-require-min-compat-client pacific
- ceph.healthy:
| 142 | 19.428571 | 54 | yaml |