Search is not available for this dataset
repo
stringlengths
2
152
file
stringlengths
15
239
code
stringlengths
0
58.4M
file_length
int64
0
58.4M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
364 values
null
ceph-main/examples/rgw/java/ceph-s3-upload/src/test/java/org/example/cephs3upload/AppTest.java
package org.example.cephs3upload; import junit.framework.Test; import junit.framework.TestCase; import junit.framework.TestSuite; /** * Unit test for simple App. */ public class AppTest extends TestCase { /** * Create the test case * * @param testName name of the test case */ public...
652
15.74359
46
java
null
ceph-main/examples/rgw/lua/elasticsearch_adapter.lua
local elasticsearch = require ("elasticsearch") local json = require ("lunajson") local client = elasticsearch.client{ hosts = { { host = "localhost", port = "9200" } } } local copyfrom = {} if (Request.CopyFrom ~= nil) then copyfrom = { Tenant = Request.CopyFrom.Tenant, Bucket = Req...
3,276
27.495652
64
lua
null
ceph-main/examples/rgw/lua/elasticsearch_adapter.md
# Introduction This directory contains an example `elasticsearch_adapter.lua` on how to use [Lua Scripting](https://docs.ceph.com/en/latest/radosgw/lua-scripting/) to push fields of the RGW requests to [Elasticsearch](https://www.elastic.co/elasticsearch/). ## Elasticsearch Install and run Elasticsearch using docker...
1,831
29.533333
204
md
null
ceph-main/examples/rgw/lua/nats_adapter.lua
local json = require ("lunajson") local nats = require ("nats") function nats_connect(nats_host, nats_port) local nats_params = { host = nats_host, port = nats_port, } client = nats.connect(nats_params) client:connect() end function toJson(request, eventName, opaq...
3,874
40.223404
115
lua
null
ceph-main/examples/rgw/lua/nats_adapter.md
# Introduction This directory contains examples on how to use [Lua Scripting](https://docs.ceph.com/en/latest/radosgw/lua-scripting/) together with a [NATS Lua client](https://github.com/dawnangel/lua-nats) to add NATS to the list of bucket notifications endpoints. ## NATS To test your setup: * Install [NATS](https:/...
3,178
30.166667
250
md
null
ceph-main/examples/rgw/lua/prometheus_adapter.lua
local http = require("socket.http") local ltn12 = require("ltn12") local respbody = {} local op = "rgw_other_request_content_length" if (Request.RGWOp == "put_obj") then op = "rgw_put_request_content_length" elseif (Request.RGWOp == "get_obj") then op = "rgw_get_request_content_length" end local field = op .. " " ...
685
27.583333
66
lua
null
ceph-main/examples/rgw/lua/prometheus_adapter.md
# Introduction This directory contains an example `prometheus_adapter.lua` on how to use [Lua Scripting](https://docs.ceph.com/en/latest/radosgw/lua-scripting/) to push metrics from the RGW requests to [Prometheus](https://prometheus.io/), specifically to collect information on object sizes. ## Prometheus As every ...
1,850
29.85
204
md
null
ceph-main/examples/rgw/lua/storage_class.lua
local function isempty(input) return input == nil or input == '' end if Request.RGWOp == 'put_obj' then RGWDebugLog("Put_Obj with StorageClass: " .. Request.HTTP.StorageClass ) if (isempty(Request.HTTP.StorageClass)) then if (Request.ContentLength >= 65536) then RGWDebugLog("No StorageClass for Object ...
792
38.65
137
lua
null
ceph-main/examples/rgw/lua/storage_class.md
# Introduction This directory contains an example `storage_class.lua` on how to use [Lua Scripting](https://docs.ceph.com/en/latest/radosgw/lua-scripting/) to read and write the Storage Class field of a put request. ## Usage - following examples based on vstart environment built in ceph/build and commands invoked fro...
2,307
45.16
153
md
null
ceph-main/examples/rgw/lua/config/prometheus.yml
global: scrape_interval: 2s # By default, scrape targets every 15 seconds. # Attach these labels to any time series or alerts when communicating with # external systems (federation, remote storage, Alertmanager). external_labels: monitor: 'codelab-monitor' # A scrape configuration containing exactly one e...
669
34.263158
97
yml
null
ceph-main/examples/rgw/rgw-cache/nginx-lua-file.lua
local check = ngx.req.get_headers()["AUTHORIZATION"] local uri = ngx.var.request_uri local ngx_re = require "ngx.re" local hdrs = ngx.req.get_headers() --Take all signedheaders names, this for creating the X-Amz-Cache which is necessary to override range header to be able to readahead an object local res, err = ngx_re...
1,161
42.037037
143
lua
null
ceph-main/fusetrace/fusetrace_ll.cc
// -*- mode:C++; tab-width:8; c-basic-offset:4; indent-tabs-mode:t -*- // vim: ts=8 sw=4 smarttab /* FUSE: Filesystem in Userspace Copyright (C) 2001-2007 Miklos Szeredi <miklos@szeredi.hu> This program can be distributed under the terms of the GNU GPL. See the file COPYING. gcc -Wall `pkg-conf...
28,326
22.904641
104
cc
null
ceph-main/man/conf.py
import os import sys project = u'Ceph' copyright = u'2010-2014, Inktank Storage, Inc. and contributors. Licensed under Creative Commons Attribution Share Alike 3.0 (CC-BY-SA-3.0)' version = 'dev' release = 'dev' exclude_patterns = ['**/.#*', '**/*~'] def _get_description(fname, base): with open(fname) as f: ...
2,065
29.835821
140
py
null
ceph-main/mirroring/README.md
# Mirroring Ceph Ceph is primarily distributed from download.ceph.com which is based in the US. However, globally there are multiple mirrors which offer the same content. Often faster than downloading from the primary source. Using the script found in this directory you can easily mirror Ceph to your local datacenter...
2,161
31.268657
130
md
null
ceph-main/mirroring/mirror-ceph.sh
#!/usr/bin/env bash set -e # # Script to mirror Ceph locally # # Please, choose a local source and do not sync in a shorter interval than # 3 hours. # SILENT=0 # All available source mirrors declare -A SOURCES SOURCES[eu]="eu.ceph.com" SOURCES[de]="de.ceph.com" SOURCES[se]="se.ceph.com" SOURCES[au]="au.ceph.com" SOURC...
2,560
24.356436
76
sh
null
ceph-main/mirroring/test-mirrors.sh
#!/usr/bin/env bash # # Simple script which performs a HTTP and rsync check on # all Ceph mirrors over IPv4 and IPv6 to see if they are online # # Requires IPv4, IPv6, rsync and curl # # Example usage: # - ./test-mirrors.sh eu.ceph.com,de.ceph.com,au.ceph.com # - cat MIRRORS |cut -d ':' -f 1|xargs -n 1 ./test-mirrors.s...
1,327
17.971429
63
sh
null
ceph-main/monitoring/ceph-mixin/README.md
## Prometheus Monitoring Mixin for Ceph A set of Grafana dashboards and Prometheus alerts for Ceph. All the Grafana dashboards are already generated in the `dashboards_out` directory and alerts in the `prometheus_alerts.yml` file. You can use the Grafana dashboards and alerts with Jsonnet like any other prometheus mi...
2,933
37.103896
117
md
null
ceph-main/monitoring/ceph-mixin/jsonnet-bundler-build.sh
#!/bin/sh -ex JSONNET_VERSION="v0.4.0" OUTPUT_DIR=${1:-$(pwd)} git clone -b ${JSONNET_VERSION} --depth 1 https://github.com/jsonnet-bundler/jsonnet-bundler make -C jsonnet-bundler build mv jsonnet-bundler/_output/jb ${OUTPUT_DIR}
233
25
92
sh
null
ceph-main/monitoring/ceph-mixin/lint-jsonnet.sh
#!/bin/sh -e JSONNETS_FILES=$(find . -name 'vendor' -prune -o \ -name '*.jsonnet' -print -o -name '*.libsonnet' -print) jsonnetfmt "$@" ${JSONNETS_FILES}
179
29
79
sh
null
ceph-main/monitoring/ceph-mixin/prometheus_alerts.yml
groups: - name: "cluster health" rules: - alert: "CephHealthError" annotations: description: "The cluster state has been HEALTH_ERROR for more than 5 minutes. Please check 'ceph health detail' for more information." summary: "Ceph is in the ERROR state" expr: "ceph_health...
40,452
60.760305
658
yml
null
ceph-main/monitoring/ceph-mixin/test-jsonnet.sh
#!/bin/sh -e TEMPDIR=$(mktemp -d) BASEDIR=$(dirname "$0") jsonnet -J vendor -m ${TEMPDIR} $BASEDIR/dashboards.jsonnet truncate -s 0 ${TEMPDIR}/json_difference.log for file in ${BASEDIR}/dashboards_out/*.json do file_name="$(basename $file)" for generated_file in ${TEMPDIR}/*.json do generated_fil...
1,022
27.416667
80
sh
null
ceph-main/monitoring/ceph-mixin/tests_alerts/README.md
## Alert Rule Standards The alert rules should adhere to the following principles - each alert must have a unique name - each alert should define a common structure - labels : must contain severity and type - annotations : must provide description - expr : must define the promql expression - alert : defines ...
5,399
57.064516
136
md
null
ceph-main/monitoring/ceph-mixin/tests_alerts/__init__.py
0
0
0
py
null
ceph-main/monitoring/ceph-mixin/tests_alerts/settings.py
import os ALERTS_FILE = '../prometheus_alerts.yml' UNIT_TESTS_FILE = 'test_alerts.yml' MIB_FILE = '../../snmp/CEPH-MIB.txt' current_dir = os.path.dirname(os.path.abspath(__file__)) ALERTS_FILE = os.path.join(current_dir, ALERTS_FILE) UNIT_TESTS_FILE = os.path.join(current_dir, UNIT_TESTS_FILE) MIB_FILE = os.path.joi...
345
27.833333
60
py
null
ceph-main/monitoring/ceph-mixin/tests_alerts/test_alerts.yml
rule_files: - ../prometheus_alerts.yml evaluation_interval: 5m tests: # health error - interval: 5m input_series: - series: 'ceph_health_status{instance="ceph:9283",job="ceph"}' values: '2 2 2 2 2 2 2' promql_expr_test: - expr: ceph_health_status == 2 eval_time: 5m exp_samples: ...
81,203
41.987824
595
yml
null
ceph-main/monitoring/ceph-mixin/tests_alerts/test_syntax.py
import pytest import os import yaml from .utils import promtool_available, call from .settings import ALERTS_FILE, UNIT_TESTS_FILE def load_yaml(file_name): yaml_data = None with open(file_name, 'r') as alert_file: raw = alert_file.read() try: yaml_data = yaml.safe_load(raw) ...
1,176
26.372093
105
py
null
ceph-main/monitoring/ceph-mixin/tests_alerts/test_unittests.py
import pytest import os from .utils import promtool_available, call from .settings import ALERTS_FILE, UNIT_TESTS_FILE def test_alerts_present(): assert os.path.exists(ALERTS_FILE), f"{ALERTS_FILE} not found" def test_unittests_present(): assert os.path.exists(UNIT_TESTS_FILE), f"{UNIT_TESTS_FILE} not found...
603
29.2
107
py
null
ceph-main/monitoring/ceph-mixin/tests_alerts/utils.py
import pytest import shutil import subprocess def promtool_available() -> bool: return shutil.which('promtool') is not None def call(cmd): completion = subprocess.run(cmd.split(), stdout=subprocess.PIPE) return completion
238
17.384615
68
py
null
ceph-main/monitoring/ceph-mixin/tests_alerts/validate_rules.py
#!/usr/bin/env python3 # # Check the Prometheus rules for format, and integration # with the unit tests. This script has the following exit # codes: # 0 .. Everything worked # 4 .. rule problems or missing unit tests # 8 .. Missing fields in YAML # 12 .. Invalid YAML - unable to load # 16 .. Missing input files # # ...
18,342
31.068182
151
py
null
ceph-main/monitoring/ceph-mixin/tests_dashboards/__init__.py
import re import subprocess import sys import tempfile from dataclasses import asdict, dataclass, field from typing import Any, List import yaml from .util import replace_grafana_expr_variables @dataclass class InputSeries: series: str = '' values: str = '' @dataclass class ExprSample: labels: str = ''...
5,809
29.578947
91
py
null
ceph-main/monitoring/ceph-mixin/tests_dashboards/util.py
import json import re from pathlib import Path from typing import Any, Dict, Tuple, Union from termcolor import cprint UNITS = ['ms', 's', 'm', 'h', 'd', 'w', 'y'] def resolve_time_and_unit(time: str) -> Union[Tuple[int, str], Tuple[None, None]]: """ Divide time with its unit and return a tuple like (10, 'm...
4,549
40.363636
99
py
null
ceph-main/monitoring/ceph-mixin/tests_dashboards/features/__init__.py
0
0
0
py
null
ceph-main/monitoring/ceph-mixin/tests_dashboards/features/environment.py
# type: ignore[no-redef] # pylint: disable=E0611,W0613,E0102 import copy from behave import given, then, when from prettytable import PrettyTable from tests_dashboards import PromqlTest from tests_dashboards.util import get_dashboards_data, resolve_time_and_unit class GlobalContext: def __init__(self): ...
4,649
33.191176
94
py
null
ceph-main/monitoring/ceph-mixin/tests_dashboards/features/steps/__init__.py
# This file and steps files is needed even if its empty because of 'behave' :(
79
39
78
py
null
ceph-main/monitoring/grafana/build/README.md
# Building the ceph-grafana container image From Nautilus onwards, grafana is embedded into the mgr/dashboard UI and uses two discrete grafana plugins to provide visualisations within the UI. To better support disconnected installs, and provide a more tested configuration you may use the Makefile, in this directory, to...
1,744
44.921053
340
md
null
ceph-main/monitoring/snmp/README.md
# SNMP schema To show the [OID](https://en.wikipedia.org/wiki/Object_identifier)'s supported by the MIB, use the snmptranslate command. Here's an example: ``` snmptranslate -Pu -Tz -M ~/git/ceph/monitoring/snmp:/usr/share/snmp/mibs -m CEPH-MIB ``` *The `snmptranslate` command is in the net-snmp-utils package* The MIB ...
2,746
48.945455
140
md
null
ceph-main/qa/find-used-ports.sh
#!/bin/bash git --no-pager grep -n '127.0.0.1:[0-9]\+' | sed -n 's/.*127.0.0.1:\([0-9]\+\).*/\1/p' | sort -n | uniq -u
120
29.25
106
sh
null
ceph-main/qa/loopall.sh
#!/usr/bin/env bash set -ex basedir=`echo $0 | sed 's/[^/]*$//g'`. testdir="$1" [ -n "$2" ] && logdir=$2 || logdir=$1 [ ${basedir:0:1} == "." ] && basedir=`pwd`/${basedir:1} PATH="$basedir/src:$PATH" [ -z "$testdir" ] || [ ! -d "$testdir" ] && echo "specify test dir" && exit 1 cd $testdir while true do for te...
689
22.793103
102
sh
null
ceph-main/qa/run-standalone.sh
#!/usr/bin/env bash set -e if [ ! -e CMakeCache.txt -o ! -d bin ]; then echo 'run this from the build dir' exit 1 fi function get_cmake_variable() { local variable=$1 grep "$variable" CMakeCache.txt | cut -d "=" -f 2 } function get_python_path() { python_common=$(realpath ../src/python-common) ...
3,517
23.774648
85
sh
null
ceph-main/qa/run_xfstests-obsolete.sh
#!/usr/bin/env bash # Copyright (C) 2012 Dreamhost, LLC # # This is free software; see the source for copying conditions. # There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. # # This is free software; you can redistribute it and/or modify it # under the terms of the GNU General ...
12,288
25.77342
79
sh
null
ceph-main/qa/run_xfstests.sh
#!/usr/bin/env bash # Copyright (C) 2012 Dreamhost, LLC # # This is free software; see the source for copying conditions. # There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. # # This is free software; you can redistribute it and/or modify it # under the terms of the GNU General ...
8,000
23.694444
78
sh
null
ceph-main/qa/run_xfstests_qemu.sh
#!/usr/bin/env bash # # TODO switch to run_xfstests.sh (see run_xfstests_krbd.sh) set -x [ -n "${TESTDIR}" ] || export TESTDIR="/tmp/cephtest" [ -d "${TESTDIR}" ] || mkdir "${TESTDIR}" URL_BASE="https://git.ceph.com/?p=ceph.git;a=blob_plain;f=qa" SCRIPT="run_xfstests-obsolete.sh" cd "${TESTDIR}" curl -O "${URL_BAS...
1,050
23.44186
73
sh
null
ceph-main/qa/runallonce.sh
#!/usr/bin/env bash set -ex basedir=`echo $0 | sed 's/[^/]*$//g'`. testdir="$1" [ -n "$2" ] && logdir=$2 || logdir=$1 [ ${basedir:0:1} == "." ] && basedir=`pwd`/${basedir:1} PATH="$basedir/src:$PATH" [ -z "$testdir" ] || [ ! -d "$testdir" ] && echo "specify test dir" && exit 1 cd $testdir for test in `cd $basedir...
665
24.615385
98
sh
null
ceph-main/qa/runoncfuse.sh
#!/usr/bin/env bash set -x mkdir -p testspace ceph-fuse testspace -m $1 ./runallonce.sh testspace killall ceph-fuse
118
12.222222
25
sh
null
ceph-main/qa/runonkclient.sh
#!/usr/bin/env bash set -x mkdir -p testspace /bin/mount -t ceph $1 testspace ./runallonce.sh testspace /bin/umount testspace
129
12
31
sh
null
ceph-main/qa/setup-chroot.sh
#!/usr/bin/env bash die() { echo ${@} exit 1 } usage() { cat << EOF $0: sets up a chroot environment for building the ceph server usage: -h Show this message -r [install_dir] location of the root filesystem to install to example: -r /images/...
1,636
23.80303
82
sh
null
ceph-main/qa/test_import.py
# try to import all .py files from a given directory import glob import os import importlib import importlib.util import pytest def _module_name(path): task = os.path.splitext(path)[0] parts = task.split(os.path.sep) package = parts[0] name = ''.join('.' + c for c in parts[1:]) return package, nam...
1,001
24.692308
61
py
null
ceph-main/qa/archs/aarch64.yaml
arch: aarch64
14
6.5
13
yaml
null
ceph-main/qa/archs/armv7.yaml
arch: armv7l
13
6
12
yaml
null
ceph-main/qa/archs/i686.yaml
arch: i686
11
5
10
yaml
null
ceph-main/qa/archs/x86_64.yaml
arch: x86_64
13
6
12
yaml
null
ceph-main/qa/btrfs/clone_range.c
#include <fcntl.h> #include <stdlib.h> #include <sys/ioctl.h> #include <string.h> #include <linux/types.h> #include "../../src/os/btrfs_ioctl.h" #include <stdio.h> #include <errno.h> int main(int argc, char **argv) { struct btrfs_ioctl_clone_range_args ca; int dfd; int r; if (argc < 6) { p...
919
24.555556
84
c
null
ceph-main/qa/btrfs/create_async_snap.c
#include <stdlib.h> #include <unistd.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <errno.h> #include <stdio.h> #include <sys/ioctl.h> #include <string.h> #include <linux/ioctl.h> #include <linux/types.h> #include "../../src/os/btrfs_ioctl.h" struct btrfs_ioctl_vol_args_v2 va; int main(...
757
20.657143
66
c
null
ceph-main/qa/btrfs/test_async_snap.c
#include <stdlib.h> #include <unistd.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <errno.h> #include <stdio.h> #include <sys/ioctl.h> #include <string.h> #include <linux/ioctl.h> #include <linux/types.h> #include "../../src/os/btrfs_ioctl.h" struct btrfs_ioctl_vol_args_v2 va; struct btr...
2,148
24.583333
75
c
null
ceph-main/qa/btrfs/test_rmdir_async_snap.c
#include <stdlib.h> #include <unistd.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <errno.h> #include <stdio.h> #include <sys/ioctl.h> #include <string.h> #include <linux/ioctl.h> #include <linux/types.h> #include "../../src/os/btrfs_ioctl.h" struct btrfs_ioctl_vol_args_v2 va; struct btr...
1,373
20.809524
72
c
null
ceph-main/qa/cephfs/unshare_ns_mount.sh
#!/usr/bin/env bash # This is one helper for mounting the ceph-fuse/kernel clients by # unsharing the network namespace, let's call it netns container. # With the netns container, you can easily suspend or resume the # virtual network interface to simulate the client node hard # shutdown for some test cases. # # ...
17,928
29.132773
105
sh
null
ceph-main/qa/cephfs/begin/0-install.yaml
tasks: - install: extra_packages: rpm: - python3-cephfs - cephfs-top - cephfs-mirror deb: - python3-cephfs - cephfs-shell - cephfs-top - cephfs-mirror # For kernel_untar_build workunit extra_system_packages: deb: ...
1,408
20.348485
39
yaml
null
ceph-main/qa/cephfs/begin/1-ceph.yaml
log-rotate: ceph-mds: 10G ceph-osd: 10G tasks: - ceph:
61
9.333333
15
yaml
null
ceph-main/qa/cephfs/begin/2-logrotate.yaml
log-rotate: ceph-mds: 10G ceph-osd: 10G
44
10.25
15
yaml
null
ceph-main/qa/cephfs/clusters/1-mds-1-client-coloc.yaml
roles: - [mon.a, mgr.y, mds.a, osd.0, osd.1, osd.2, osd.3, client.0] - [mon.b, mon.c, mgr.x, mds.b, osd.4, osd.5, osd.6, osd.7] openstack: - volumes: # attached to each instance count: 4 size: 20 # GB - machine: disk: 200 # GB
239
23
61
yaml
null
ceph-main/qa/cephfs/clusters/1-mds-1-client-micro.yaml
roles: - [mon.a, mon.b, mon.c, mgr.x, mds.a, osd.0, osd.1, osd.2, osd.3] - [client.0] openstack: - volumes: # attached to each instance count: 4 size: 10 # GB
167
20
65
yaml
null
ceph-main/qa/cephfs/clusters/1-mds-1-client.yaml
roles: - [mon.a, mgr.y, mds.a, mds.c, osd.0, osd.1, osd.2, osd.3] - [mon.b, mon.c, mgr.x, mds.b, osd.4, osd.5, osd.6, osd.7] - [client.0] openstack: - volumes: # attached to each instance count: 4 size: 20 # GB - machine: disk: 200 # GB
249
21.727273
58
yaml
null
ceph-main/qa/cephfs/clusters/1-mds-2-client-coloc.yaml
roles: - [mon.a, mgr.y, mds.a, osd.0, osd.1, osd.2, osd.3, client.0] - [mon.b, mon.c, mgr.x, mds.b, osd.4, osd.5, osd.6, osd.7, client.1] openstack: - volumes: # attached to each instance count: 4 size: 20 # GB - machine: disk: 200 # GB
249
24
68
yaml
null
ceph-main/qa/cephfs/clusters/1-mds-2-client-micro.yaml
roles: - [mon.a, mon.b, mon.c, mgr.x, mgr.y, mds.a, mds.b, mds.c, osd.0, osd.1, osd.2, osd.3] - [client.0] - [client.1] openstack: - volumes: # attached to each instance count: 4 size: 10 # GB
201
21.444444
86
yaml
null
ceph-main/qa/cephfs/clusters/1-mds-2-client.yaml
roles: - [mon.a, mgr.y, mds.a, mds.c, osd.0, osd.1, osd.2, osd.3] - [mon.b, mon.c, mgr.x, mds.b, osd.4, osd.5, osd.6, osd.7] - [client.0] - [client.1] openstack: - volumes: # attached to each instance count: 4 size: 30 # GB - machine: disk: 200 # GB
262
20.916667
58
yaml
null
ceph-main/qa/cephfs/clusters/1-mds-3-client.yaml
roles: - [mon.a, mgr.y, mds.a, osd.0, osd.1, osd.2, osd.3] - [mon.b, mon.c, mgr.x, mds.b, osd.4, osd.5, osd.6, osd.7] - [client.0] - [client.1] - [client.2] openstack: - volumes: # attached to each instance count: 4 size: 30 # GB - machine: disk: 200 # GB
268
19.692308
58
yaml
null
ceph-main/qa/cephfs/clusters/1-mds-4-client-coloc.yaml
roles: - [mon.a, mgr.y, mds.a, osd.0, osd.1, osd.2, osd.3, client.0, client.1] - [mon.b, mon.c, mgr.x, mds.b, osd.4, osd.5, osd.6, osd.7, client.2, client.3] openstack: - volumes: # attached to each instance count: 4 size: 30 # GB - machine: disk: 200 # GB
269
26
78
yaml
null
ceph-main/qa/cephfs/clusters/1-mds-4-client.yaml
roles: - [mon.a, mgr.y, mds.a, mds.b, osd.0, osd.1, osd.2, osd.3] - [mon.b, mon.c, mgr.x, mds.c, osd.4, osd.5, osd.6, osd.7] - [client.0] - [client.1] - [client.2] - [client.3] openstack: - volumes: # attached to each instance count: 4 size: 30 # GB - machine: disk: 200 # GB
288
19.642857
58
yaml
null
ceph-main/qa/cephfs/clusters/1-node-1-mds-1-osd.yaml
roles: - [mon.a, mgr.x, mds.a, osd.0, client.0] openstack: - volumes: # attached to each instance count: 1 size: 5 # GB - machine: disk: 10 # GB
157
16.555556
40
yaml
null
ceph-main/qa/cephfs/clusters/1a11s-mds-1c-client-3node.yaml
roles: - [mon.a, mgr.x, mds.a, mds.d, mds.g, mds.j, osd.0, osd.3, osd.6, osd.9, client.0] - [mon.b, mgr.y, mds.b, mds.e, mds.h, mds.k, osd.1, osd.4, osd.7, osd.10] - [mon.c, mgr.z, mds.c, mds.f, mds.i, mds.l, osd.2, osd.5, osd.8, osd.11] openstack: - volumes: # attached to each instance count: 4 size: 20 # GB -...
349
30.818182
82
yaml
null
ceph-main/qa/cephfs/clusters/1a2s-mds-1c-client-3node.yaml
roles: - [mon.a, mgr.x, mds.a, osd.0, osd.3, osd.6, osd.9, client.0] - [mon.b, mgr.y, mds.b, osd.1, osd.4, osd.7, osd.10] - [mon.c, mgr.z, mds.c, osd.2, osd.5, osd.8, osd.11] openstack: - volumes: # attached to each instance count: 4 size: 20 # GB - machine: disk: 200 # GB
286
25.090909
61
yaml
null
ceph-main/qa/cephfs/clusters/1a3s-mds-1c-client.yaml
roles: - [mon.a, mgr.y, mds.a, mds.c, osd.0, osd.1, osd.2, osd.3, client.0] - [mon.b, mon.c, mgr.x, mds.b, mds.d, osd.4, osd.5, osd.6, osd.7] openstack: - volumes: # attached to each instance count: 4 size: 20 # GB - machine: disk: 200 # GB
253
24.4
68
yaml
null
ceph-main/qa/cephfs/clusters/1a3s-mds-2c-client.yaml
roles: - [mon.a, mgr.y, mds.a, mds.c, osd.0, osd.1, osd.2, osd.3, client.0] - [mon.b, mon.c, mgr.x, mds.b, mds.d, osd.4, osd.5, osd.6, osd.7, client.1] openstack: - volumes: # attached to each instance count: 4 size: 20 # GB - machine: disk: 200 # GB
263
25.4
75
yaml
null
ceph-main/qa/cephfs/clusters/1a3s-mds-4c-client.yaml
roles: - [mon.a, mgr.y, mds.a, mds.c, osd.0, osd.1, osd.2, osd.3, client.0, client.2] - [mon.b, mon.c, mgr.x, mds.b, mds.d, osd.4, osd.5, osd.6, osd.7, client.1, client.3] openstack: - volumes: # attached to each instance count: 4 size: 20 # GB - machine: disk: 200 # GB
283
27.4
85
yaml
null
ceph-main/qa/cephfs/clusters/1a5s-mds-1c-client-3node.yaml
roles: - [mon.a, mgr.x, mds.a, mds.d, osd.0, osd.3, osd.6, osd.9, client.0] - [mon.b, mgr.y, mds.b, mds.e, osd.1, osd.4, osd.7, osd.10] - [mon.c, mgr.z, mds.c, mds.f, osd.2, osd.5, osd.8, osd.11] openstack: - volumes: # attached to each instance count: 4 size: 20 # GB - machine: disk: 200 # GB
307
27
68
yaml
null
ceph-main/qa/cephfs/clusters/1a5s-mds-1c-client.yaml
roles: - [mon.a, mgr.y, mds.a, mds.c, mds.e, osd.0, osd.1, osd.2, osd.3, client.0] - [mon.b, mon.c, mgr.x, mds.b, mds.d, mds.f, osd.4, osd.5, osd.6, osd.7] openstack: - volumes: # attached to each instance count: 4 size: 20 # GB - machine: disk: 200 # GB
267
25.8
75
yaml
null
ceph-main/qa/cephfs/clusters/3-mds.yaml
roles: - [mon.a, mon.c, mgr.y, mds.a, osd.0, osd.1, osd.2, osd.3] - [mon.b, mgr.x, mds.b, mds.c, osd.4, osd.5, osd.6, osd.7] - [client.0, client.1] overrides: ceph: cephfs: max_mds: 3 openstack: - volumes: # attached to each instance count: 4 size: 30 # GB - machine: disk: 200 # GB
307
19.533333
58
yaml
null
ceph-main/qa/cephfs/clusters/9-mds.yaml
roles: - [mon.a, mon.c, mgr.y, mds.a, mds.b, mds.c, mds.d, osd.0, osd.1, osd.2, osd.3] - [mon.b, mgr.x, mds.e, mds.f, mds.g, mds.h, mds.i, osd.4, osd.5, osd.6, osd.7] - [client.0, client.1] overrides: ceph: cephfs: max_mds: 9 openstack: - volumes: # attached to each instance count: 4 size: 30 # GB -...
349
22.333333
79
yaml
null
ceph-main/qa/cephfs/clusters/fixed-2-ucephfs.yaml
roles: - [mon.a, mgr.y, mds.a, osd.0, osd.1, osd.2, osd.3, client.0] - [mon.b, mon.c, mgr.x, mds.b, osd.4, osd.5, osd.6, osd.7] openstack: - volumes: # attached to each instance count: 4 size: 30 # GB - machine: disk: 200 # GB
239
23
61
yaml
null
ceph-main/qa/cephfs/conf/client.yaml
overrides: ceph: conf: client: client mount timeout: 600 debug ms: 1 debug client: 20 rados mon op timeout: 900 rados osd op timeout: 900
190
18.1
33
yaml
null
ceph-main/qa/cephfs/conf/mds.yaml
overrides: ceph: conf: mds: debug mds: 20 debug mds balancer: 20 debug ms: 1 mds debug frag: true mds debug scatterstat: true mds op complaint time: 180 mds verify scatter: true osd op complaint time: 180 rados mon op timeout: 900 ...
349
22.333333
35
yaml
null
ceph-main/qa/cephfs/conf/mon.yaml
overrides: ceph: conf: mon: mon op complaint time: 120
75
11.666667
34
yaml
null
ceph-main/qa/cephfs/conf/osd.yaml
overrides: ceph: conf: osd: osd op complaint time: 180
75
11.666667
34
yaml
null
ceph-main/qa/cephfs/mount/fuse.yaml
teuthology: postmerge: - local function is_kupstream() return false end - local function is_kdistro() return false end - local function is_fuse() return true end - local function syntax_version() return '' end tasks: - ceph-fuse:
282
15.647059
35
yaml
null
ceph-main/qa/cephfs/mount/kclient/mount.yaml
teuthology: postmerge: - local function is_kupstream() return yaml.ktype == 'upstream' end - local function is_kdistro() return yaml.ktype == 'distro' end - local function is_fuse() return false end - local function syntax_version() return yaml.overrides.kclient.syntax ...
342
19.176471
42
yaml
null
ceph-main/qa/cephfs/mount/kclient/mount-syntax/v1.yaml
overrides: kclient: syntax: 'v1'
41
9.5
18
yaml
null
ceph-main/qa/cephfs/mount/kclient/mount-syntax/v2.yaml
overrides: kclient: syntax: 'v2'
41
9.5
18
yaml
null
ceph-main/qa/cephfs/mount/kclient/overrides/ms-die-on-skipped.yaml
overrides: ceph: conf: global: ms die on skipped message: false
84
13.166667
40
yaml
null
ceph-main/qa/cephfs/mount/kclient/overrides/distro/stock/k-stock.yaml
kernel: client: sha1: distro ktype: distro
49
9
16
yaml
null
ceph-main/qa/cephfs/mount/kclient/overrides/distro/stock/rhel_8.yaml
.qa/distros/all/rhel_8.yaml
27
27
27
yaml
null
ceph-main/qa/cephfs/mount/kclient/overrides/distro/testing/k-testing.yaml
kernel: client: branch: testing ktype: upstream
54
10
19
yaml
null
ceph-main/qa/cephfs/objectstore-ec/bluestore-bitmap.yaml
../../objectstore/bluestore-bitmap.yaml
39
39
39
yaml
null
ceph-main/qa/cephfs/objectstore-ec/bluestore-comp-ec-root.yaml
overrides: thrashosds: bdev_inject_crash: 2 bdev_inject_crash_probability: .5 ceph: fs: xfs cephfs: ec_profile: - m=2 - k=2 - crush-failure-domain=osd conf: osd: osd objectstore: bluestore bluestore block size: 96636764160 debug bluesto...
818
26.3
90
yaml
null
ceph-main/qa/cephfs/objectstore-ec/bluestore-comp.yaml
overrides: thrashosds: bdev_inject_crash: 2 bdev_inject_crash_probability: .5 ceph: fs: xfs conf: osd: osd objectstore: bluestore bluestore block size: 96636764160 debug bluestore: 20 debug bluefs: 20 debug rocksdb: 10 bluestore compression mode:...
724
29.208333
90
yaml
null
ceph-main/qa/cephfs/objectstore-ec/bluestore-ec-root.yaml
overrides: thrashosds: bdev_inject_crash: 2 bdev_inject_crash_probability: .5 ceph: fs: xfs cephfs: ec_profile: - m=2 - k=2 - crush-failure-domain=osd conf: osd: osd objectstore: bluestore bluestore block size: 96636764160 debug bluesto...
1,250
27.431818
90
yaml
null
ceph-main/qa/cephfs/overrides/frag.yaml
overrides: ceph: conf: mds: mds bal fragment size max: 10000 mds bal merge size: 5 mds bal split bits: 3 mds bal split size: 100
174
16.5
40
yaml
null
ceph-main/qa/cephfs/overrides/ignorelist_health.yaml
overrides: ceph: log-ignorelist: - overall HEALTH_ - \(FS_DEGRADED\) - \(MDS_FAILED\) - \(MDS_DEGRADED\) - \(FS_WITH_FAILED_MDS\) - \(MDS_DAMAGE\) - \(MDS_ALL_DOWN\) - \(MDS_UP_LESS_THAN_MAX\) - \(FS_INLINE_DATA_DEPRECATED\)
285
21
37
yaml
null
ceph-main/qa/cephfs/overrides/ignorelist_wrongly_marked_down.yaml
overrides: ceph: log-ignorelist: - overall HEALTH_ - \(OSD_DOWN\) - \(OSD_ - but it is still running # MDS daemon 'b' is not responding, replacing it as rank 0 with standby 'a' - is not responding
233
22.4
75
yaml
null
ceph-main/qa/cephfs/overrides/osd-asserts.yaml
overrides: ceph: conf: osd: osd shutdown pgref assert: true
80
12.5
39
yaml
null
ceph-main/qa/cephfs/overrides/session_timeout.yaml
overrides: ceph: cephfs: session_timeout: 300
58
10.8
26
yaml