Search is not available for this dataset
repo stringlengths 2 152 ⌀ | file stringlengths 15 239 | code stringlengths 0 58.4M | file_length int64 0 58.4M | avg_line_length float64 0 1.81M | max_line_length int64 0 12.7M | extension_type stringclasses 364
values |
|---|---|---|---|---|---|---|
null | ceph-main/qa/workunits/fs/snaps/snaptest-capwb.sh | #!/bin/sh -x
set -e
mkdir foo
# make sure mds handles it when the client does not send flushsnap
echo x > foo/x
sync
mkdir foo/.snap/ss
ln foo/x foo/xx
cat foo/.snap/ss/x
rmdir foo/.snap/ss
#
echo a > foo/a
echo b > foo/b
mkdir foo/.snap/s
r=`cat foo/.snap/s/a`
[ -z "$r" ] && echo "a appears empty in snapshot" && f... | 492 | 13.5 | 66 | sh |
null | ceph-main/qa/workunits/fs/snaps/snaptest-dir-rename.sh | #!/bin/sh -x
set -e
#
# make sure we keep an existing dn's seq
#
mkdir a
mkdir .snap/bar
mkdir a/.snap/foo
rmdir a/.snap/foo
rmdir a
stat .snap/bar/a
rmdir .snap/bar
echo OK
| 178 | 8.944444 | 40 | sh |
null | ceph-main/qa/workunits/fs/snaps/snaptest-double-null.sh | #!/bin/sh -x
set -e
# multiple intervening snapshots with no modifications, and thus no
# snapflush client_caps messages. make sure the mds can handle this.
for f in `seq 1 20` ; do
mkdir a
cat > a/foo &
mkdir a/.snap/one
mkdir a/.snap/two
chmod 777 a/foo
sync # this might crash the mds
ps
rmdir a/.snap/*
rm a/f... | 346 | 13.458333 | 69 | sh |
null | ceph-main/qa/workunits/fs/snaps/snaptest-estale.sh | #!/bin/sh -x
mkdir .snap/foo
echo "We want ENOENT, not ESTALE, here."
for f in `seq 1 100`
do
stat .snap/foo/$f 2>&1 | grep 'No such file'
done
rmdir .snap/foo
echo "OK"
| 178 | 11.785714 | 48 | sh |
null | ceph-main/qa/workunits/fs/snaps/snaptest-git-ceph.sh | #!/bin/sh -x
set -e
# try it again if the clone is slow and the second time
retried=false
trap -- 'retry' EXIT
retry() {
rm -rf ceph
# double the timeout value
timeout 3600 git clone https://git.ceph.com/ceph.git
}
rm -rf ceph
timeout 1800 git clone https://git.ceph.com/ceph.git
trap - EXIT
cd ceph
versi... | 790 | 13.924528 | 56 | sh |
null | ceph-main/qa/workunits/fs/snaps/snaptest-hardlink.sh | #!/bin/sh -x
set -e
mkdir 1 2
echo asdf >1/file1
echo asdf >1/file2
ln 1/file1 2/file1
ln 1/file2 2/file2
mkdir 2/.snap/s1
echo qwer >1/file1
grep asdf 2/.snap/s1/file1
rm -f 1/file2
grep asdf 2/.snap/s1/file2
rm -f 2/file2
grep asdf 2/.snap/s1/file2
rmdir 2/.snap/s1
rm -rf 1 2
echo OK
| 295 | 10.384615 | 26 | sh |
null | ceph-main/qa/workunits/fs/snaps/snaptest-intodir.sh | #!/bin/sh -ex
# this tests fix for #1399
mkdir foo
mkdir foo/.snap/one
touch bar
mv bar foo
sync
# should not crash :)
mkdir baz
mkdir baz/.snap/two
mv baz foo
sync
# should not crash :)
# clean up.
rmdir foo/baz/.snap/two
rmdir foo/.snap/one
rm -r foo
echo OK
| 265 | 10.565217 | 26 | sh |
null | ceph-main/qa/workunits/fs/snaps/snaptest-multiple-capsnaps.sh | #!/bin/sh -x
set -e
echo asdf > a
mkdir .snap/1
chmod 777 a
mkdir .snap/2
echo qwer > a
mkdir .snap/3
chmod 666 a
mkdir .snap/4
echo zxcv > a
mkdir .snap/5
ls -al .snap/?/a
grep asdf .snap/1/a
stat .snap/1/a | grep 'Size: 5'
grep asdf .snap/2/a
stat .snap/2/a | grep 'Size: 5'
stat .snap/2/a | grep -- '-rwxrwxrwx'
... | 628 | 13.627907 | 37 | sh |
null | ceph-main/qa/workunits/fs/snaps/snaptest-name-limits.sh | #!/bin/bash
#
# This tests snapshot names limits: names have to be < 240 chars
#
function cleanup ()
{
rmdir d1/.snap/*
rm -rf d1
}
function fail ()
{
echo $@
cleanup
exit 1
}
mkdir d1
longname=$(printf "%.241d" 2)
mkdir d1/.snap/$longname 2> /dev/null
[ -d d1/.snap/$longname ] && fail "Invalid snapshot exists... | 351 | 11.571429 | 70 | sh |
null | ceph-main/qa/workunits/fs/snaps/snaptest-parents.sh | #!/bin/sh
set -ex
echo "making directory tree and files"
mkdir -p 1/a/b/c/
echo "i'm file1" > 1/a/file1
echo "i'm file2" > 1/a/b/file2
echo "i'm file3" > 1/a/b/c/file3
echo "snapshotting"
mkdir 1/.snap/foosnap1
mkdir 2
echo "moving tree"
mv 1/a 2
echo "checking snapshot contains tree..."
dir1=`find 1/.snap/foosnap1 |... | 1,043 | 25.1 | 49 | sh |
null | ceph-main/qa/workunits/fs/snaps/snaptest-realm-split.sh | #!/bin/sh -x
set -e
mkdir -p 1/a
exec 3<> 1/a/file1
echo -n a >&3
mkdir 1/.snap/s1
echo -n b >&3
mkdir 2
# create new snaprealm at dir a, file1's cap should be attached to the new snaprealm
mv 1/a 2
mkdir 2/.snap/s2
echo -n c >&3
exec 3>&-
grep '^a$' 1/.snap/s1/a/file1
grep '^ab$' 2/.snap/s2/a/file1
grep '^ab... | 388 | 11.15625 | 84 | sh |
null | ceph-main/qa/workunits/fs/snaps/snaptest-snap-rename.sh | #!/bin/sh -x
expect_failure() {
if "$@"; then return 1; else return 0; fi
}
set -e
mkdir -p d1/d2
mkdir -p d1/d3
mkdir d1/.snap/foo
mkdir d1/d2/.snap/foo
mkdir d1/d3/.snap/foo
mkdir d1/d3/.snap/bar
mv d1/d2/.snap/foo d1/d2/.snap/bar
# snapshot name can't start with _
expect_failure mv d1/d2/.snap/bar d1/d2/.snap/_ba... | 893 | 25.294118 | 86 | sh |
null | ceph-main/qa/workunits/fs/snaps/snaptest-snap-rm-cmp.sh | #!/bin/sh -x
set -e
file=linux-2.6.33.tar.bz2
wget -q http://download.ceph.com/qa/$file
real=`md5sum $file | awk '{print $1}'`
for f in `seq 1 20`
do
echo $f
cp $file a
mkdir .snap/s
rm a
cp .snap/s/a /tmp/a
cur=`md5sum /tmp/a | awk '{print $1}'`
if [ "$cur" != "$real" ]; then
echo "FAI... | 407 | 15.32 | 50 | sh |
null | ceph-main/qa/workunits/fs/snaps/snaptest-upchildrealms.sh | #!/bin/sh -x
set -e
#
# verify that a snap update on a parent realm will induce
# snap cap writeback for inodes child realms
#
mkdir a
mkdir a/b
mkdir a/.snap/a1
mkdir a/b/.snap/b1
echo asdf > a/b/foo
mkdir a/.snap/a2
# client _should_ have just queued a capsnap for writeback
ln a/b/foo a/b/bar # make the serv... | 496 | 16.137931 | 58 | sh |
null | ceph-main/qa/workunits/fs/snaps/snaptest-xattrwb.sh | #!/bin/sh -x
set -e
echo "testing simple xattr wb"
touch x
setfattr -n user.foo x
mkdir .snap/s1
getfattr -n user.foo .snap/s1/x | grep user.foo
rm x
rmdir .snap/s1
echo "testing wb with pre-wb server cow"
mkdir a
mkdir a/b
mkdir a/b/c
# b now has As but not Ax
setfattr -n user.foo a/b
mkdir a/.snap/s
mkdir a/b/cc
#... | 569 | 18 | 73 | sh |
null | ceph-main/qa/workunits/fs/snaps/untar_snap_rm.sh | #!/bin/sh
set -ex
do_tarball() {
wget http://download.ceph.com/qa/$1
tar xvf$2 $1
mkdir .snap/k
sync
rm -rv $3
cp -av .snap/k .
rmdir .snap/k
rm -rv k
rm $1
}
do_tarball coreutils_8.5.orig.tar.gz z coreutils-8.5
do_tarball linux-2.6.33.tar.bz2 j linux-2.6.33
| 298 | 14.736842 | 52 | sh |
null | ceph-main/qa/workunits/hadoop/repl.sh | #!/usr/bin/env bash
set -e
set -x
# bail if $TESTDIR is not set as this test will fail in that scenario
[ -z $TESTDIR ] && { echo "\$TESTDIR needs to be set, but is not. Exiting."; exit 1; }
# if HADOOP_PREFIX is not set, use default
[ -z $HADOOP_PREFIX ] && { HADOOP_PREFIX=$TESTDIR/hadoop; }
# create pools with di... | 1,154 | 25.860465 | 86 | sh |
null | ceph-main/qa/workunits/hadoop/terasort.sh | #!/usr/bin/env bash
set -e
set -x
INPUT=/terasort-input
OUTPUT=/terasort-output
REPORT=/tersort-report
num_records=100000
[ ! -z $NUM_RECORDS ] && num_records=$NUM_RECORDS
# bail if $TESTDIR is not set as this test will fail in that scenario
[ -z $TESTDIR ] && { echo "\$TESTDIR needs to be set, but is not. Exiting.... | 2,189 | 27.441558 | 86 | sh |
null | ceph-main/qa/workunits/hadoop/wordcount.sh | #!/usr/bin/env bash
set -ex
WC_INPUT=/wc_input
WC_OUTPUT=/wc_output
DATA_INPUT=$(mktemp -d)
echo "starting hadoop-wordcount test"
# bail if $TESTDIR is not set as this test will fail in that scenario
[ -z $TESTDIR ] && { echo "\$TESTDIR needs to be set, but is not. Exiting."; exit 1; }
# if HADOOP_PREFIX is not se... | 968 | 26.685714 | 86 | sh |
null | ceph-main/qa/workunits/libcephfs/test.sh | #!/bin/sh -e
ceph_test_libcephfs
ceph_test_libcephfs_access
ceph_test_libcephfs_reclaim
ceph_test_libcephfs_lazyio
ceph_test_libcephfs_newops
ceph_test_libcephfs_suidsgid
exit 0
| 180 | 15.454545 | 28 | sh |
null | ceph-main/qa/workunits/mgr/test_localpool.sh | #!/bin/sh -ex
ceph config set mgr mgr/localpool/subtree host
ceph config set mgr mgr/localpool/failure_domain osd
ceph mgr module enable localpool
while ! ceph osd pool ls | grep '^by-host-'
do
sleep 5
done
ceph mgr module disable localpool
for p in `ceph osd pool ls | grep '^by-host-'`
do
ceph osd pool rm $... | 459 | 19.909091 | 56 | sh |
null | ceph-main/qa/workunits/mgr/test_per_module_finisher.sh | #!/usr/bin/env bash
set -ex
# This testcase tests the per module finisher stats for enabled modules
# using check counter (qa/tasks/check_counter.py).
# 'balancer' commands
ceph balancer pool ls
# 'crash' commands
ceph crash ls
ceph crash ls-new
# 'device' commands
ceph device query-daemon-health-metrics mon.a
# '... | 624 | 15.025641 | 71 | sh |
null | ceph-main/qa/workunits/mon/auth_caps.sh | #!/usr/bin/env bash
set -e
set -x
declare -A keymap
combinations="r w x rw rx wx rwx"
for i in ${combinations}; do
k="foo_$i"
k=`ceph auth get-or-create-key client.$i mon "allow $i"` || exit 1
keymap["$i"]=$k
done
# add special caps
keymap["all"]=`ceph auth get-or-create-key client.all mon 'allow *'` || exit ... | 2,610 | 18.931298 | 78 | sh |
null | ceph-main/qa/workunits/mon/auth_key_rotation.sh | #!/usr/bin/bash -ex
function expect_false()
{
set -x
if "$@"; then return 1; else return 0; fi
}
ceph auth export
ceph auth rm client.rot
ceph auth get-or-create client.rot mon 'allow rwx'
ceph auth export client.rot | grep key
ceph auth export client.rot | expect_false grep pending.key
ceph auth get-or-create-p... | 1,642 | 26.847458 | 79 | sh |
null | ceph-main/qa/workunits/mon/caps.py | #!/usr/bin/python3
from __future__ import print_function
import subprocess
import shlex
import errno
import sys
import os
import io
import re
from ceph_argparse import * # noqa
keyring_base = '/tmp/cephtest-caps.keyring'
class UnexpectedReturn(Exception):
def __init__(self, cmd, ret, expected, msg):
if isins... | 9,820 | 26.280556 | 93 | py |
null | ceph-main/qa/workunits/mon/caps.sh | #!/usr/bin/env bash
set -x
tmp=/tmp/cephtest-mon-caps-madness
exit_on_error=1
[[ ! -z $TEST_EXIT_ON_ERROR ]] && exit_on_error=$TEST_EXIT_ON_ERROR
if [ `uname` = FreeBSD ]; then
ETIMEDOUT=60
else
ETIMEDOUT=110
fi
expect()
{
cmd=$1
expected_ret=$2
echo $cmd
eval $cmd >&/dev/null
ret=$?
if [[ $... | 2,743 | 29.153846 | 81 | sh |
null | ceph-main/qa/workunits/mon/config.sh | #!/bin/bash -ex
function expect_false()
{
set -x
if "$@"; then return 1; else return 0; fi
}
ceph config dump
# value validation
ceph config set mon.a debug_asok 22
ceph config set mon.a debug_asok 22/33
ceph config get mon.a debug_asok | grep 22
ceph config set mon.a debug_asok 1/2
expect_false ceph config set mo... | 4,097 | 28.912409 | 69 | sh |
null | ceph-main/qa/workunits/mon/crush_ops.sh | #!/usr/bin/env bash
set -ex
function expect_false()
{
set -x
if "$@"; then return 1; else return 0; fi
}
ceph osd crush dump
# rules
ceph osd crush rule dump
ceph osd crush rule ls
ceph osd crush rule list
ceph osd crush rule create-simple foo default host
ceph osd crush rule create-simple foo default host
ceph ... | 8,549 | 34.92437 | 82 | sh |
null | ceph-main/qa/workunits/mon/osd.sh | #!/bin/sh -x
set -e
ua=`uuidgen`
ub=`uuidgen`
# should get same id with same uuid
na=`ceph osd create $ua`
test $na -eq `ceph osd create $ua`
nb=`ceph osd create $ub`
test $nb -eq `ceph osd create $ub`
test $nb -ne $na
ceph osd rm $na
ceph osd rm $na
ceph osd rm $nb
ceph osd rm 1000
na2=`ceph osd create $ua`
ech... | 326 | 12.08 | 35 | sh |
null | ceph-main/qa/workunits/mon/pg_autoscaler.sh | #!/bin/bash -ex
NUM_OSDS=$(ceph osd ls | wc -l)
if [ $NUM_OSDS -lt 6 ]; then
echo "test requires at least 6 OSDs"
exit 1
fi
NUM_POOLS=$(ceph osd pool ls | wc -l)
if [ $NUM_POOLS -gt 0 ]; then
echo "test requires no preexisting pools"
exit 1
fi
function wait_for() {
local sec=$1
local cmd=$2
... | 5,383 | 33.292994 | 97 | sh |
null | ceph-main/qa/workunits/mon/ping.py | #!/usr/bin/python3
import json
import shlex
import subprocess
class UnexpectedReturn(Exception):
def __init__(self, cmd, ret, expected, msg):
if isinstance(cmd, list):
self.cmd = ' '.join(cmd)
else:
assert isinstance(cmd, str), \
'cmd needs to be either a l... | 3,047 | 27.485981 | 81 | py |
null | ceph-main/qa/workunits/mon/pool_ops.sh | #!/usr/bin/env bash
set -ex
function expect_false()
{
set -x
if "$@"; then return 1; else return 0; fi
}
function get_config_value_or_die()
{
local pool_name config_opt raw val
pool_name=$1
config_opt=$2
raw="`$SUDO ceph osd pool get $pool_name $config_opt 2>/dev/null`"
if [[ $? -ne 0 ]]; then
echo... | 3,062 | 28.171429 | 75 | sh |
null | ceph-main/qa/workunits/mon/rbd_snaps_ops.sh | #!/usr/bin/env bash
# attempt to trigger #6047
cmd_no=0
expect()
{
cmd_no=$(($cmd_no+1))
cmd="$1"
expected=$2
echo "[$cmd_no] $cmd"
eval $cmd
ret=$?
if [[ $ret -ne $expected ]]; then
echo "[$cmd_no] unexpected return '$ret', expected '$expected'"
exit 1
fi
}
ceph osd pool delete test test -... | 2,074 | 32.467742 | 79 | sh |
null | ceph-main/qa/workunits/mon/test_config_key_caps.sh | #!/usr/bin/env bash
set -x
set -e
tmp=$(mktemp -d -p /tmp test_mon_config_key_caps.XXXXX)
entities=()
function cleanup()
{
set +e
set +x
if [[ -e $tmp/keyring ]] && [[ -e $tmp/keyring.orig ]]; then
grep '\[.*\..*\]' $tmp/keyring.orig > $tmp/entities.orig
for e in $(grep '\[.*\..*\]' $tmp/keyring | \
diff $... | 7,172 | 34.509901 | 79 | sh |
null | ceph-main/qa/workunits/mon/test_mon_config_key.py | #!/usr/bin/python3
#
# test_mon_config_key - Test 'ceph config-key' interface
#
# Copyright (C) 2013 Inktank
#
# This is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License version 2.1, as published by the Free Software
# Foundation. See file COPYING.
#
... | 15,314 | 32.006466 | 96 | py |
null | ceph-main/qa/workunits/mon/test_mon_osdmap_prune.sh | #!/bin/bash
. $(dirname $0)/../../standalone/ceph-helpers.sh
set -x
function wait_for_osdmap_manifest() {
local what=${1:-"true"}
local -a delays=($(get_timeout_delays $TIMEOUT .1))
local -i loop=0
for ((i=0; i < ${#delays[*]}; ++i)); do
has_manifest=$(ceph report | jq 'has("osdmap_manifest")')
if... | 5,230 | 24.393204 | 71 | sh |
null | ceph-main/qa/workunits/mon/test_noautoscale_flag.sh | #!/bin/bash -ex
unset CEPH_CLI_TEST_DUP_COMMAND
NUM_POOLS=$(ceph osd pool ls | wc -l)
if [ "$NUM_POOLS" -gt 0 ]; then
echo "test requires no preexisting pools"
exit 1
fi
ceph osd pool set noautoscale
ceph osd pool create pool_a
echo 'pool_a autoscale_mode:' $(ceph osd pool autoscale-status | grep pool_a |... | 1,959 | 22.333333 | 102 | sh |
null | ceph-main/qa/workunits/objectstore/test_fuse.sh | #!/bin/sh -ex
if ! id -u | grep -q '^0$'; then
echo "not root, re-running self via sudo"
sudo PATH=$PATH TYPE=$TYPE $0
exit 0
fi
expect_false()
{
set -x
if "$@"; then return 1; else return 0; fi
}
COT=ceph-objectstore-tool
DATA=store_test_fuse_dir
[ -z "$TYPE" ] && TYPE=bluestore
MNT=stor... | 4,352 | 32.484615 | 87 | sh |
null | ceph-main/qa/workunits/osdc/stress_objectcacher.sh | #!/bin/sh -ex
for i in $(seq 1 10)
do
for DELAY in 0 1000
do
for OPS in 1000 10000
do
for OBJECTS in 10 50 100
do
for READS in 0.90 0.50 0.10
do
for OP_SIZE in 4096 131072 1048576
do
... | 793 | 26.37931 | 214 | sh |
null | ceph-main/qa/workunits/rados/clone.sh | #!/bin/sh -x
set -e
rados -p data rm foo || true
rados -p data put foo.tmp /etc/passwd --object-locator foo
rados -p data clonedata foo.tmp foo --object-locator foo
rados -p data get foo /tmp/foo
cmp /tmp/foo /etc/passwd
rados -p data rm foo.tmp --object-locator foo
rados -p data rm foo
echo OK | 298 | 22 | 58 | sh |
null | ceph-main/qa/workunits/rados/load-gen-big.sh | #!/bin/sh
rados -p rbd load-gen \
--num-objects 10240 \
--min-object-size 1048576 \
--max-object-size 25600000 \
--max-ops 1024 \
--max-backlog 1024 \
--read-percent 50 \
--run-length 1200
| 218 | 18.909091 | 32 | sh |
null | ceph-main/qa/workunits/rados/load-gen-mix-small-long.sh | #!/bin/sh
rados -p rbd load-gen \
--num-objects 1024 \
--min-object-size 1 \
--max-object-size 1048576 \
--max-ops 128 \
--max-backlog 128 \
--read-percent 50 \
--run-length 1800
| 208 | 18 | 31 | sh |
null | ceph-main/qa/workunits/rados/load-gen-mix-small.sh | #!/bin/sh
rados -p rbd load-gen \
--num-objects 1024 \
--min-object-size 1 \
--max-object-size 1048576 \
--max-ops 128 \
--max-backlog 128 \
--read-percent 50 \
--run-length 600
| 207 | 17.909091 | 31 | sh |
null | ceph-main/qa/workunits/rados/load-gen-mix.sh | #!/bin/sh
rados -p rbd load-gen \
--num-objects 10240 \
--min-object-size 1 \
--max-object-size 1048576 \
--max-ops 128 \
--max-backlog 128 \
--read-percent 50 \
--run-length 600
| 208 | 18 | 31 | sh |
null | ceph-main/qa/workunits/rados/load-gen-mostlyread.sh | #!/bin/sh
rados -p rbd load-gen \
--num-objects 51200 \
--min-object-size 1 \
--max-object-size 1048576 \
--max-ops 128 \
--max-backlog 128 \
--read-percent 90 \
--run-length 600
| 208 | 18 | 31 | sh |
null | ceph-main/qa/workunits/rados/stress_watch.sh | #!/bin/sh -e
ceph_test_stress_watch
ceph_multi_stress_watch rep reppool repobj
ceph_multi_stress_watch ec ecpool ecobj
exit 0
| 128 | 15.125 | 42 | sh |
null | ceph-main/qa/workunits/rados/test.sh | #!/usr/bin/env bash
set -ex
parallel=1
[ "$1" = "--serial" ] && parallel=0
color=""
[ -t 1 ] && color="--gtest_color=yes"
function cleanup() {
pkill -P $$ || true
}
trap cleanup EXIT ERR HUP INT QUIT
declare -A pids
for f in \
api_aio api_aio_pp \
api_io api_io_pp \
api_asio api_list \
api_lock... | 1,175 | 17.666667 | 107 | sh |
null | ceph-main/qa/workunits/rados/test_alloc_hint.sh | #!/usr/bin/env bash
set -ex
shopt -s nullglob # fns glob expansion in expect_alloc_hint_eq()
#
# Helpers
#
function get_xml_val() {
local xml="$1"
local tag="$2"
local regex=".*<${tag}>(.*)</${tag}>.*"
if [[ ! "${xml}" =~ ${regex} ]]; then
echo "'${xml}' xml doesn't match '${tag}' tag regex"... | 4,881 | 26.426966 | 88 | sh |
null | ceph-main/qa/workunits/rados/test_cache_pool.sh | #!/usr/bin/env bash
set -ex
expect_false()
{
set -x
if "$@"; then return 1; else return 0; fi
}
# create pools, set up tier relationship
ceph osd pool create base_pool 2
ceph osd pool application enable base_pool rados
ceph osd pool create partial_wrong 2
ceph osd pool create wrong_cache 2
ceph osd tier add base_p... | 5,597 | 31.736842 | 78 | sh |
null | ceph-main/qa/workunits/rados/test_crash.sh | #!/bin/sh
set -x
# run on a single-node three-OSD cluster
sudo killall -ABRT ceph-osd
sleep 5
# kill caused coredumps; find them and delete them, carefully, so as
# not to disturb other coredumps, or else teuthology will see them
# and assume test failure. sudos are because the core files are
# root/600
for f in $... | 1,406 | 30.266667 | 77 | sh |
null | ceph-main/qa/workunits/rados/test_crushdiff.sh | #!/usr/bin/env bash
set -ex
REP_POOL=
EC_POOL=
TEMPDIR=
OSD_NUM=$(ceph osd ls | wc -l)
test ${OSD_NUM} -gt 0
setup() {
local pool
TEMPDIR=`mktemp -d`
pool=test-crushdiff-rep-$$
ceph osd pool create ${pool} 32
REP_POOL=${pool}
rados -p ${REP_POOL} bench 5 write --no-cleanup
if [ ${OSD_... | 3,064 | 28.471154 | 77 | sh |
null | ceph-main/qa/workunits/rados/test_dedup_tool.sh | #!/usr/bin/env bash
set -x
die() {
echo "$@"
exit 1
}
do_run() {
if [ "$1" == "--tee" ]; then
shift
tee_out="$1"
shift
"$@" | tee $tee_out
else
"$@"
fi
}
run_expect_succ() {
echo "RUN_EXPECT_SUCC: " "$@"
do_run "$@"
[ $? -ne 0 ] && die "expected success,... | 16,953 | 35.936819 | 197 | sh |
null | ceph-main/qa/workunits/rados/test_envlibrados_for_rocksdb.sh | #!/usr/bin/env bash
set -ex
############################################
# Helper functions
############################################
source $(dirname $0)/../ceph-helpers-root.sh
############################################
# Install required tools
############################################
echo "Install req... | 2,681 | 26.367347 | 170 | sh |
null | ceph-main/qa/workunits/rados/test_hang.sh | #!/bin/sh -ex
# Hang forever for manual testing using the thrasher
while(true)
do
sleep 300
done
exit 0
| 108 | 11.111111 | 52 | sh |
null | ceph-main/qa/workunits/rados/test_health_warnings.sh | #!/usr/bin/env bash
set -uex
# number of osds = 10
crushtool -o crushmap --build --num_osds 10 host straw 2 rack straw 2 row straw 2 root straw 0
ceph osd setcrushmap -i crushmap
ceph osd tree
ceph tell osd.* injectargs --osd_max_markdown_count 1024 --osd_max_markdown_period 1
ceph osd set noout
wait_for_healthy() {... | 1,899 | 23.675325 | 94 | sh |
null | ceph-main/qa/workunits/rados/test_large_omap_detection.py | #!/usr/bin/python3
# -*- mode:python -*-
# vim: ts=4 sw=4 smarttab expandtab
#
# Copyright (C) 2017 Red Hat <contact@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version ... | 4,467 | 32.096296 | 84 | py |
null | ceph-main/qa/workunits/rados/test_libcephsqlite.sh | #!/bin/bash -ex
# The main point of these tests beyond ceph_test_libcephsqlite is to:
#
# - Ensure you can load the Ceph VFS via the dynamic load extension mechanism
# in SQLite.
# - Check the behavior of a dead application, that it does not hold locks
# indefinitely.
pool="$1"
ns="$(basename $0)"
function sqlit... | 2,769 | 19.218978 | 180 | sh |
null | ceph-main/qa/workunits/rados/test_librados_build.sh | #!/bin/bash -ex
#
# Compile and run a librados application outside of the ceph build system, so
# that we can be sure librados.h[pp] is still usable and hasn't accidentally
# started depending on internal headers.
#
# The script assumes all dependencies - e.g. curl, make, gcc, librados headers,
# libradosstriper header... | 2,134 | 23.261364 | 90 | sh |
null | ceph-main/qa/workunits/rados/test_pool_access.sh | #!/usr/bin/env bash
set -ex
KEYRING=$(mktemp)
trap cleanup EXIT ERR HUP INT QUIT
cleanup() {
(ceph auth del client.mon_read || true) >/dev/null 2>&1
(ceph auth del client.mon_write || true) >/dev/null 2>&1
rm -f $KEYRING
}
expect_false()
{
set -x
if "$@"; then return 1; else return 0; fi
}
create_po... | 2,235 | 19.513761 | 93 | sh |
null | ceph-main/qa/workunits/rados/test_pool_quota.sh | #!/bin/sh -ex
p=`uuidgen`
# objects
ceph osd pool create $p 12
ceph osd pool set-quota $p max_objects 10
ceph osd pool application enable $p rados
for f in `seq 1 10` ; do
rados -p $p put obj$f /etc/passwd
done
sleep 30
rados -p $p put onemore /etc/passwd &
pid=$!
ceph osd pool set-quota $p max_objects 100
wait... | 1,210 | 16.550725 | 58 | sh |
null | ceph-main/qa/workunits/rados/test_python.sh | #!/bin/sh -ex
ceph osd pool create rbd
${PYTHON:-python3} -m nose -v $(dirname $0)/../../../src/test/pybind/test_rados.py "$@"
exit 0
| 135 | 21.666667 | 87 | sh |
null | ceph-main/qa/workunits/rados/test_rados_timeouts.sh | #!/usr/bin/env bash
set -x
delay_mon() {
MSGTYPE=$1
shift
$@ --rados-mon-op-timeout 1 --ms-inject-delay-type mon --ms-inject-delay-max 10000000 --ms-inject-delay-probability 1 --ms-inject-delay-msg-type $MSGTYPE
if [ $? -eq 0 ]; then
exit 1
fi
}
delay_osd() {
MSGTYPE=$1
shift
$... | 1,338 | 26.326531 | 157 | sh |
null | ceph-main/qa/workunits/rados/test_rados_tool.sh | #!/usr/bin/env bash
set -x
die() {
echo "$@"
exit 1
}
usage() {
cat <<EOF
test_rados_tool.sh: tests rados_tool
-c: RADOS configuration file to use [optional]
-k: keep temp files
-h: this help message
-p: set temporary pool to use [optional]
EOF
}
do_run() {
if [ "$1" == "... | 31,345 | 32.887568 | 196 | sh |
null | ceph-main/qa/workunits/rados/version_number_sanity.sh | #!/bin/bash -ex
#
# test that ceph RPM/DEB package version matches "ceph --version"
# (for a loose definition of "matches")
#
source /etc/os-release
case $ID in
debian|ubuntu)
RPMDEB='DEB'
dpkg-query --show ceph-common
PKG_NAME_AND_VERSION=$(dpkg-query --show ceph-common)
;;
centos|fedora|rhel|opensuse*... | 998 | 31.225806 | 93 | sh |
null | ceph-main/qa/workunits/rbd/cli_generic.sh | #!/usr/bin/env bash
set -ex
. $(dirname $0)/../../standalone/ceph-helpers.sh
export RBD_FORCE_ALLOW_V1=1
# make sure rbd pool is EMPTY.. this is a test script!!
rbd ls | wc -l | grep -v '^0$' && echo "nonempty rbd pool, aborting! run this script on an empty test cluster only." && exit 1
IMGS="testimg1 testimg2 tes... | 61,261 | 34.700466 | 197 | sh |
null | ceph-main/qa/workunits/rbd/cli_migration.sh | #!/usr/bin/env bash
set -ex
. $(dirname $0)/../../standalone/ceph-helpers.sh
TEMPDIR=
IMAGE1=image1
IMAGE2=image2
IMAGE3=image3
IMAGES="${IMAGE1} ${IMAGE2} ${IMAGE3}"
cleanup() {
cleanup_tempdir
remove_images
}
setup_tempdir() {
TEMPDIR=`mktemp -d`
}
cleanup_tempdir() {
rm -rf ${TEMPDIR}
}
create_... | 9,606 | 25.835196 | 132 | sh |
null | ceph-main/qa/workunits/rbd/concurrent.sh | #!/usr/bin/env bash
# Copyright (C) 2013 Inktank Storage, Inc.
#
# This is free software; see the source for copying conditions.
# There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE.
#
# This is free software; you can redistribute it and/or modify it
# under the terms of the GNU G... | 9,823 | 25.12766 | 71 | sh |
null | ceph-main/qa/workunits/rbd/diff.sh | #!/usr/bin/env bash
set -ex
function cleanup() {
rbd snap purge foo || :
rbd rm foo || :
rbd snap purge foo.copy || :
rbd rm foo.copy || :
rbd snap purge foo.copy2 || :
rbd rm foo.copy2 || :
rm -f foo.diff foo.out
}
cleanup
rbd create foo --size 1000
rbd bench --io-type write foo --io-siz... | 1,373 | 24.444444 | 100 | sh |
null | ceph-main/qa/workunits/rbd/diff_continuous.sh | #!/usr/bin/env bash
set -ex
set -o pipefail
function untar_workload() {
local i
for ((i = 0; i < 10; i++)); do
pv -L 10M linux-5.4.tar.gz > "${MOUNT}/linux-5.4.tar.gz"
tar -C "${MOUNT}" -xzf "${MOUNT}/linux-5.4.tar.gz"
sync "${MOUNT}"
rm -rf "${MOUNT}"/linux-5.4*
done
}
fu... | 3,089 | 27.878505 | 76 | sh |
null | ceph-main/qa/workunits/rbd/huge-tickets.sh | #!/usr/bin/env bash
# This is a test for http://tracker.ceph.com/issues/8979 and the fallout
# from triaging it. #8979 itself was random crashes on corrupted memory
# due to a buffer overflow (for tickets larger than 256 bytes), further
# inspection showed that vmalloced tickets weren't handled correctly as
# well.
#... | 1,342 | 30.97619 | 85 | sh |
null | ceph-main/qa/workunits/rbd/image_read.sh | #!/usr/bin/env bash
# Copyright (C) 2013 Inktank Storage, Inc.
#
# This is free software; see the source for copying conditions.
# There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE.
#
# This is free software; you can redistribute it and/or modify it
# under the terms of the GNU G... | 17,897 | 25.281938 | 72 | sh |
null | ceph-main/qa/workunits/rbd/import_export.sh | #!/bin/sh -ex
# V1 image unsupported but required for testing purposes
export RBD_FORCE_ALLOW_V1=1
# returns data pool for a given image
get_image_data_pool () {
image=$1
data_pool=$(rbd info $image | grep "data_pool: " | awk -F':' '{ print $NF }')
if [ -z $data_pool ]; then
data_pool='rbd'
fi
... | 8,814 | 32.903846 | 87 | sh |
null | ceph-main/qa/workunits/rbd/issue-20295.sh | #!/bin/sh -ex
TEST_POOL=ecpool
TEST_IMAGE=test1
PGS=12
ceph osd pool create $TEST_POOL $PGS $PGS erasure
ceph osd pool application enable $TEST_POOL rbd
ceph osd pool set $TEST_POOL allow_ec_overwrites true
rbd --data-pool $TEST_POOL create --size 1024G $TEST_IMAGE
rbd bench \
--io-type write \
--io-size 4096... | 396 | 19.894737 | 58 | sh |
null | ceph-main/qa/workunits/rbd/journal.sh | #!/usr/bin/env bash
set -e
. $(dirname $0)/../../standalone/ceph-helpers.sh
function list_tests()
{
echo "AVAILABLE TESTS"
for i in $TESTS; do
echo " $i"
done
}
function usage()
{
echo "usage: $0 [-h|-l|-t <testname> [-t <testname>...] [--no-cleanup]]"
}
function expect_false()
{
set -x
if "$@"... | 8,280 | 24.324159 | 98 | sh |
null | ceph-main/qa/workunits/rbd/kernel.sh | #!/usr/bin/env bash
set -ex
CEPH_SECRET_FILE=${CEPH_SECRET_FILE:-}
CEPH_ID=${CEPH_ID:-admin}
SECRET_ARGS=''
if [ ! -z $CEPH_SECRET_FILE ]; then
SECRET_ARGS="--secret $CEPH_SECRET_FILE"
fi
TMP_FILES="/tmp/img1 /tmp/img1.small /tmp/img1.snap1 /tmp/img1.export /tmp/img1.trunc"
function expect_false() {
if "$@"; then ... | 2,952 | 28.237624 | 86 | sh |
null | ceph-main/qa/workunits/rbd/krbd_data_pool.sh | #!/usr/bin/env bash
set -ex
export RBD_FORCE_ALLOW_V1=1
function fill_image() {
local spec=$1
local dev
dev=$(sudo rbd map $spec)
xfs_io -c "pwrite -b $OBJECT_SIZE -S 0x78 -W 0 $IMAGE_SIZE" $dev
sudo rbd unmap $dev
}
function create_clones() {
local spec=$1
rbd snap create $spec@snap
... | 6,189 | 28.903382 | 118 | sh |
null | ceph-main/qa/workunits/rbd/krbd_exclusive_option.sh | #!/usr/bin/env bash
set -ex
function expect_false() {
if "$@"; then return 1; else return 0; fi
}
function assert_locked() {
local dev_id="${1#/dev/rbd}"
local client_addr
client_addr="$(< $SYSFS_DIR/$dev_id/client_addr)"
local client_id
client_id="$(< $SYSFS_DIR/$dev_id/client_id)"
# c... | 6,079 | 24.982906 | 99 | sh |
null | ceph-main/qa/workunits/rbd/krbd_fallocate.sh | #!/usr/bin/env bash
# - fallocate -z deallocates because BLKDEV_ZERO_NOUNMAP hint is ignored by
# krbd
#
# - big unaligned blkdiscard and fallocate -z/-p leave the objects in place
set -ex
# no blkdiscard(8) in trusty
function py_blkdiscard() {
local offset=$1
python3 <<EOF
import fcntl, struct
BLKDISCARD =... | 4,056 | 25.690789 | 125 | sh |
null | ceph-main/qa/workunits/rbd/krbd_huge_osdmap.sh | #!/usr/bin/env bash
# This is a test for https://tracker.ceph.com/issues/40481.
#
# An osdmap with 60000 slots encodes to ~16M, of which the ignored portion
# is ~13M. However in-memory osdmap is larger than ~3M: in-memory osd_addr
# array for 60000 OSDs is ~8M because of sockaddr_storage.
#
# Set mon_max_osd = 60000... | 1,487 | 27.615385 | 76 | sh |
null | ceph-main/qa/workunits/rbd/krbd_latest_osdmap_on_map.sh | #!/bin/bash
set -ex
function run_test() {
ceph osd pool create foo 12
rbd pool init foo
rbd create --size 1 foo/img
local dev
dev=$(sudo rbd map foo/img)
sudo rbd unmap $dev
ceph osd pool delete foo foo --yes-i-really-really-mean-it
}
NUM_ITER=20
for ((i = 0; i < $NUM_ITER; i++)); do
... | 471 | 14.225806 | 62 | sh |
null | ceph-main/qa/workunits/rbd/krbd_namespaces.sh | #!/usr/bin/env bash
set -ex
function get_block_name_prefix() {
rbd info --format=json $1 | python3 -c "import sys, json; print(json.load(sys.stdin)['block_name_prefix'])"
}
function do_pwrite() {
local spec=$1
local old_byte=$2
local new_byte=$3
local dev
dev=$(sudo rbd map $spec)
cmp <(... | 4,226 | 35.128205 | 111 | sh |
null | ceph-main/qa/workunits/rbd/krbd_rxbounce.sh | #!/usr/bin/env bash
set -ex
rbd create --size 256 img
IMAGE_SIZE=$(rbd info --format=json img | python3 -c 'import sys, json; print(json.load(sys.stdin)["size"])')
OBJECT_SIZE=$(rbd info --format=json img | python3 -c 'import sys, json; print(json.load(sys.stdin)["object_size"])')
NUM_OBJECTS=$((IMAGE_SIZE / OBJECT_... | 2,500 | 23.048077 | 117 | sh |
null | ceph-main/qa/workunits/rbd/krbd_stable_writes.sh | #!/usr/bin/env bash
set -ex
function assert_dm() {
local name=$1
local val=$2
local devno
devno=$(sudo dmsetup info -c --noheadings -o Major,Minor $name)
grep -q $val /sys/dev/block/$devno/queue/stable_writes
}
function dmsetup_reload() {
local name=$1
local table
table=$(</dev/stdi... | 2,666 | 17.78169 | 67 | sh |
null | ceph-main/qa/workunits/rbd/krbd_udev_enumerate.sh | #!/usr/bin/env bash
# This is a test for https://tracker.ceph.com/issues/41036, but it also
# triggers https://tracker.ceph.com/issues/41404 in some environments.
set -ex
function assert_exit_codes() {
declare -a pids=($@)
for pid in ${pids[@]}; do
wait $pid
done
}
function run_map() {
decla... | 1,248 | 17.641791 | 71 | sh |
null | ceph-main/qa/workunits/rbd/krbd_udev_netlink_enobufs.sh | #!/usr/bin/env bash
# This is a test for https://tracker.ceph.com/issues/41404, verifying that udev
# events are properly reaped while the image is being (un)mapped in the kernel.
# UDEV_BUF_SIZE is 1M (giving us a 2M socket receive buffer), but modprobe +
# modprobe -r generate ~28M worth of "block" events.
set -ex
... | 639 | 24.6 | 79 | sh |
null | ceph-main/qa/workunits/rbd/krbd_udev_netns.sh | #!/usr/bin/env bash
set -ex
sudo ip netns add ns1
sudo ip link add veth1-ext type veth peer name veth1-int
sudo ip link set veth1-int netns ns1
sudo ip netns exec ns1 ip link set dev lo up
sudo ip netns exec ns1 ip addr add 192.168.1.2/24 dev veth1-int
sudo ip netns exec ns1 ip link set veth1-int up
sudo ip netns ex... | 2,397 | 26.563218 | 76 | sh |
null | ceph-main/qa/workunits/rbd/krbd_udev_symlinks.sh | #!/usr/bin/env bash
set -ex
SPECS=(
rbd/img1
rbd/img2
rbd/img2@snap1
rbd/img3
rbd/img3@snap1
rbd/img3@snap2
rbd/ns1/img1
rbd/ns1/img2
rbd/ns1/img2@snap1
rbd/ns1/img3
rbd/ns1/img3@snap1
rbd/ns1/img3@snap2
rbd/ns2/img1
rbd/ns2/img2
rbd/ns2/img2@snap1
rbd/ns2/img3
rbd/ns2/img3@snap1
rbd/ns2/img3@snap2
custom/img1
custom... | 2,375 | 19.307692 | 82 | sh |
null | ceph-main/qa/workunits/rbd/krbd_wac.sh | #!/usr/bin/env bash
set -ex
wget http://download.ceph.com/qa/wac.c
gcc -o wac wac.c
rbd create --size 300 img
DEV=$(sudo rbd map img)
sudo mkfs.ext4 $DEV
sudo mount $DEV /mnt
set +e
sudo timeout 5m ./wac -l 65536 -n 64 -r /mnt/wac-test
RET=$?
set -e
[[ $RET -eq 124 ]]
sudo killall -w wac || true # wac forks
sudo u... | 751 | 17.341463 | 53 | sh |
null | ceph-main/qa/workunits/rbd/luks-encryption.sh | #!/usr/bin/env bash
set -ex
CEPH_ID=${CEPH_ID:-admin}
TMP_FILES="/tmp/passphrase /tmp/passphrase2 /tmp/testdata1 /tmp/testdata2 /tmp/cmpdata"
_sudo()
{
local cmd
if [ `id -u` -eq 0 ]
then
"$@"
return $?
fi
# Look for the command in the user path. If it fails run it as is,
# supposing it is... | 6,933 | 30.807339 | 190 | sh |
null | ceph-main/qa/workunits/rbd/map-snapshot-io.sh | #!/bin/sh
# http://tracker.ceph.com/issues/3964
set -ex
rbd create image -s 100
DEV=$(sudo rbd map image)
dd if=/dev/zero of=$DEV oflag=direct count=10
rbd snap create image@s1
dd if=/dev/zero of=$DEV oflag=direct count=10 # used to fail
rbd snap rm image@s1
dd if=/dev/zero of=$DEV oflag=direct count=10
sudo rbd u... | 352 | 18.611111 | 62 | sh |
null | ceph-main/qa/workunits/rbd/map-unmap.sh | #!/usr/bin/env bash
set -ex
RUN_TIME=300 # approximate duration of run (seconds)
[ $# -eq 1 ] && RUN_TIME="$1"
IMAGE_NAME="image-$$"
IMAGE_SIZE="1024" # MB
function get_time() {
date '+%s'
}
function times_up() {
local end_time="$1"
test $(get_time) -ge "${end_time}"
}
function map_unmap() {
[ $# -eq 1 ] ||... | 793 | 16.26087 | 58 | sh |
null | ceph-main/qa/workunits/rbd/merge_diff.sh | #!/usr/bin/env bash
set -ex
export RBD_FORCE_ALLOW_V1=1
pool=rbd
gen=$pool/gen
out=$pool/out
testno=1
mkdir -p merge_diff_test
pushd merge_diff_test
function expect_false()
{
if "$@"; then return 1; else return 0; fi
}
function clear_all()
{
fusermount -u mnt || true
rbd snap purge --no-progress $gen || tru... | 7,100 | 13.855649 | 100 | sh |
null | ceph-main/qa/workunits/rbd/notify_master.sh | #!/bin/sh -ex
relpath=$(dirname $0)/../../../src/test/librbd
python3 $relpath/test_notify.py master
exit 0
| 108 | 17.166667 | 46 | sh |
null | ceph-main/qa/workunits/rbd/notify_slave.sh | #!/bin/sh -ex
relpath=$(dirname $0)/../../../src/test/librbd
python3 $relpath/test_notify.py slave
exit 0
| 107 | 17 | 46 | sh |
null | ceph-main/qa/workunits/rbd/permissions.sh | #!/usr/bin/env bash
set -ex
IMAGE_FEATURES="layering,exclusive-lock,object-map,fast-diff"
clone_v2_enabled() {
image_spec=$1
rbd info $image_spec | grep "clone-parent"
}
create_pools() {
ceph osd pool create images 32
rbd pool init images
ceph osd pool create volumes 32
rbd pool init volumes
... | 9,384 | 33.759259 | 108 | sh |
null | ceph-main/qa/workunits/rbd/qemu-iotests.sh | #!/bin/sh -ex
# Run qemu-iotests against rbd. These are block-level tests that go
# through qemu but do not involve running a full vm. Note that these
# require the admin ceph user, as there's no way to pass the ceph user
# to qemu-iotests currently.
testlist='001 002 003 004 005 008 009 010 011 021 025 032 033'
git... | 1,484 | 27.557692 | 71 | sh |
null | ceph-main/qa/workunits/rbd/qemu_dynamic_features.sh | #!/usr/bin/env bash
set -x
if [[ -z "${IMAGE_NAME}" ]]; then
echo image name must be provided
exit 1
fi
is_qemu_running() {
rbd status ${IMAGE_NAME} | grep -v "Watchers: none"
}
wait_for_qemu() {
while ! is_qemu_running ; do
echo "*** Waiting for QEMU"
sleep 30
done
}
wait_for_qemu
rbd feature dis... | 1,067 | 21.723404 | 59 | sh |
null | ceph-main/qa/workunits/rbd/qemu_rebuild_object_map.sh | #!/usr/bin/env bash
set -ex
if [[ -z "${IMAGE_NAME}" ]]; then
echo image name must be provided
exit 1
fi
is_qemu_running() {
rbd status ${IMAGE_NAME} | grep -v "Watchers: none"
}
wait_for_qemu() {
while ! is_qemu_running ; do
echo "*** Waiting for QEMU"
sleep 30
done
}
wait_for_qemu
rbd feature di... | 769 | 19.263158 | 56 | sh |
null | ceph-main/qa/workunits/rbd/qos.sh | #!/bin/sh -ex
POOL=rbd
IMAGE=test$$
IMAGE_SIZE=1G
TOLERANCE_PRCNT=10
rbd_bench() {
local image=$1
local type=$2
local total=$3
local qos_type=$4
local qos_limit=$5
local iops_var_name=$6
local bps_var_name=$7
local timeout=$8
local timeout_cmd=""
if [ -n "${timeout}" ]; then
... | 3,502 | 37.494505 | 90 | sh |
null | ceph-main/qa/workunits/rbd/rbd-ggate.sh | #!/bin/sh -ex
POOL=testrbdggate$$
NS=ns
IMAGE=test
SIZE=64
DATA=
DEV=
if which xmlstarlet > /dev/null 2>&1; then
XMLSTARLET=xmlstarlet
elif which xml > /dev/null 2>&1; then
XMLSTARLET=xml
else
echo "Missing xmlstarlet binary!"
exit 1
fi
if [ `uname -K` -ge 1200078 ] ; then
RBD_GGATE_RESIZE_SUPPORTED=1
fi... | 6,519 | 26.166667 | 96 | sh |