Search is not available for this dataset
text
stringlengths
75
104k
def trun_exit(trun): """Triggers when exiting the given testrun""" if trun["conf"]["VERBOSE"]: cij.emph("rnr:trun:exit") rcode = 0 for hook in reversed(trun["hooks"]["exit"]): # EXIT-hooks rcode = script_run(trun, hook) if rcode: break if trun["conf"]["VERBOSE"]: cij.emph("rnr:trun::exit { rcode: %r }" % rcode, rcode) return rcode
def trun_enter(trun): """Triggers when entering the given testrun""" if trun["conf"]["VERBOSE"]: cij.emph("rnr:trun::enter") trun["stamp"]["begin"] = int(time.time()) # Record start timestamp rcode = 0 for hook in trun["hooks"]["enter"]: # ENTER-hooks rcode = script_run(trun, hook) if rcode: break if trun["conf"]["VERBOSE"]: cij.emph("rnr:trun::enter { rcode: %r }" % rcode, rcode) return rcode
def trun_setup(conf): """ Setup the testrunner data-structure, embedding the parsed environment variables and command-line arguments and continues with setup for testplans, testsuites, and testcases """ declr = None try: with open(conf["TESTPLAN_FPATH"]) as declr_fd: declr = yaml.safe_load(declr_fd) except AttributeError as exc: cij.err("rnr: %r" % exc) if not declr: return None trun = copy.deepcopy(TRUN) trun["ver"] = cij.VERSION trun["conf"] = copy.deepcopy(conf) trun["res_root"] = conf["OUTPUT"] trun["aux_root"] = os.sep.join([trun["res_root"], "_aux"]) trun["evars"].update(copy.deepcopy(declr.get("evars", {}))) os.makedirs(trun["aux_root"]) hook_names = declr.get("hooks", []) if "lock" not in hook_names: hook_names = ["lock"] + hook_names if hook_names[0] != "lock": return None # Setup top-level hooks trun["hooks"] = hooks_setup(trun, trun, hook_names) for enum, declr in enumerate(declr["testsuites"]): # Setup testsuites tsuite = tsuite_setup(trun, declr, enum) if tsuite is None: cij.err("main::FAILED: setting up tsuite: %r" % tsuite) return 1 trun["testsuites"].append(tsuite) trun["progress"]["UNKN"] += len(tsuite["testcases"]) return trun
def main(conf): """CIJ Test Runner main entry point""" fpath = yml_fpath(conf["OUTPUT"]) if os.path.exists(fpath): # YAML exists, we exit, it might be RUNNING! cij.err("main:FAILED { fpath: %r }, exists" % fpath) return 1 trun = trun_setup(conf) # Construct 'trun' from 'conf' if not trun: return 1 trun_to_file(trun) # Persist trun trun_emph(trun) # Print trun before run tr_err = 0 tr_ent_err = trun_enter(trun) for tsuite in (ts for ts in trun["testsuites"] if not tr_ent_err): ts_err = 0 ts_ent_err = tsuite_enter(trun, tsuite) for tcase in (tc for tc in tsuite["testcases"] if not ts_ent_err): tc_err = tcase_enter(trun, tsuite, tcase) if not tc_err: tc_err += script_run(trun, tcase) tc_err += tcase_exit(trun, tsuite, tcase) tcase["status"] = "FAIL" if tc_err else "PASS" trun["progress"][tcase["status"]] += 1 # Update progress trun["progress"]["UNKN"] -= 1 ts_err += tc_err # Accumulate errors trun_to_file(trun) # Persist trun if not ts_ent_err: ts_err += tsuite_exit(trun, tsuite) ts_err += ts_ent_err # Accumulate errors tr_err += ts_err tsuite["status"] = "FAIL" if ts_err else "PASS" cij.emph("rnr:tsuite %r" % tsuite["status"], tsuite["status"] != "PASS") if not tr_ent_err: trun_exit(trun) tr_err += tr_ent_err trun["status"] = "FAIL" if tr_err else "PASS" trun["stamp"]["end"] = int(time.time()) + 1 # END STAMP trun_to_file(trun) # PERSIST cij.emph("rnr:main:progress %r" % trun["progress"]) cij.emph("rnr:main:trun %r" % trun["status"], trun["status"] != "PASS") return trun["progress"]["UNKN"] + trun["progress"]["FAIL"]
def get_chunk_meta(self, meta_file): """Get chunk meta table""" chunks = self.envs["CHUNKS"] if cij.nvme.get_meta(0, chunks * self.envs["CHUNK_META_SIZEOF"], meta_file): raise RuntimeError("cij.liblight.get_chunk_meta: fail") chunk_meta = cij.bin.Buffer(types=self.envs["CHUNK_META_STRUCT"], length=chunks) chunk_meta.read(meta_file) return chunk_meta
def get_chunk_meta_item(self, chunk_meta, grp, pug, chk): """Get item of chunk meta table""" num_chk = self.envs["NUM_CHK"] num_pu = self.envs["NUM_PU"] index = grp * num_pu * num_chk + pug * num_chk + chk return chunk_meta[index]
def is_bad_chunk(self, chunk_meta, grp, pug, chk): """Check the chunk is offline or not""" meta = self.get_chunk_meta_item(chunk_meta, grp, pug, chk) if meta.CS & 0x8 != 0: return True return False
def s20_to_gen(self, pugrp, punit, chunk, sectr): """S20 unit to generic address""" cmd = ["nvm_addr s20_to_gen", self.envs["DEV_PATH"], "%d %d %d %d" % (pugrp, punit, chunk, sectr)] status, stdout, _ = cij.ssh.command(cmd, shell=True) if status: raise RuntimeError("cij.liblight.s20_to_gen: cmd fail") return int(re.findall(r"val: ([0-9a-fx]+)", stdout)[0], 16)
def gen_to_dev(self, address): """Generic address to device address""" cmd = ["nvm_addr gen2dev", self.envs["DEV_PATH"], "0x{:x}".format(address)] status, stdout, _ = cij.ssh.command(cmd, shell=True) if status: raise RuntimeError("cij.liblight.gen_to_dev: cmd fail") return int(re.findall(r"dev: ([0-9a-fx]+)", stdout)[0], 16)
def vblk_write(self, address, meta=False): """nvm_vblk write""" cmd = list() if meta: cmd.append("NVM_CLI_META_MODE=1") cmd += ["nvm_vblk write", self.envs["DEV_PATH"], "0x%x" % address] status, _, _ = cij.ssh.command(cmd, shell=True) return status
def vector_read(self, address_list, file_name=None): """nvm_cmd read""" address = ["0x{:x}".format(i) for i in address_list] cmd = ["nvm_cmd read", self.envs["DEV_PATH"], " ".join(address)] if file_name: cmd += ["-o {}".format(file_name)] status, _, _ = cij.ssh.command(cmd, shell=True) return status
def scalar_write(self, address, block_count, data_file, meta_file): """nvme write""" cmd = ["nvme", "write", self.envs["DEV_PATH"], "-s 0x{:x}".format(address), "-c {}".format(block_count-1), "-d {}".format(data_file), "-M {}".format(meta_file), "-z 0x{:x}".format(block_count * self.envs["NBYTES"]), "-y 0x{:x}".format(block_count * self.envs["NBYTES_OOB"])] status, _, _ = cij.ssh.command(cmd, shell=True) return status
def __run(self, shell=True, echo=True): """Run DMESG job""" if env(): return 1 cij.emph("cij.dmesg.start: shell: %r, cmd: %r" % (shell, self.__prefix + self.__suffix)) return cij.ssh.command(self.__prefix, shell, echo, self.__suffix)
def start(self): """Start DMESG job in thread""" self.__thread = Thread(target=self.__run, args=(True, False)) self.__thread.setDaemon(True) self.__thread.start()
def terminate(self): """Terminate DMESG job""" if self.__thread: cmd = ["who am i"] status, output, _ = cij.util.execute(cmd, shell=True, echo=True) if status: cij.warn("cij.dmesg.terminate: who am i failed") return 1 tty = output.split()[1] cmd = ["pkill -f '{}' -t '{}'".format(" ".join(self.__prefix), tty)] status, _, _ = cij.util.execute(cmd, shell=True, echo=True) if status: cij.warn("cij.dmesg.terminate: pkill failed") return 1 self.__thread.join() self.__thread = None return 0
def generate_rt_pic(process_data, para_meter, scale): """ generate rater pic""" pic_path = para_meter['filename'] + '.png' plt.figure(figsize=(5.6 * scale, 3.2 * scale)) for key in process_data.keys(): plt.plot(process_data[key][:, 0], process_data[key][:, 1], label=str(key)) plt.title(para_meter['title']) plt.xlabel(para_meter['x_axis_name']) plt.ylabel(para_meter['y_axis_name']) plt.legend(loc='upper left') plt.savefig(pic_path) return pic_path
def generate_steady_rt_pic(process_data, para_meter, scale, steady_time): """ generate rate steady""" pic_path_steady = para_meter['filename'] + '_steady.png' plt.figure(figsize=(4 * scale, 2.5 * scale)) for key in process_data.keys(): if len(process_data[key]) < steady_time: steady_time = len(process_data[key]) plt.scatter(process_data[key][-1 * steady_time:, 0], process_data[key][-1 * steady_time:, 1], label=str(key), s=10) steady_value = np.mean(process_data[key][-1 * steady_time:, 1]) steady_value_5 = steady_value * (1 + 0.05) steady_value_10 = steady_value * (1 + 0.1) steady_value_ng_5 = steady_value * (1 - 0.05) steady_value_ng_10 = steady_value * (1 - 0.1) plt.plot(process_data[key][-1 * steady_time:, 0], [steady_value] * steady_time, 'b') plt.plot(process_data[key][-1 * steady_time:, 0], [steady_value_5] * steady_time, 'g') plt.plot(process_data[key][-1 * steady_time:, 0], [steady_value_ng_5] * steady_time, 'g') plt.plot(process_data[key][-1 * steady_time:, 0], [steady_value_10] * steady_time, 'r') plt.plot(process_data[key][-1 * steady_time:, 0], [steady_value_ng_10] * steady_time, 'r') plt.title(para_meter['title'] + '(steady)') plt.xlabel(para_meter['x_axis_name'] + '(steady)') plt.ylabel(para_meter['y_axis_name'] + '(steady)') plt.legend(loc='upper left') plt.savefig(pic_path_steady) return pic_path_steady
def process_rt_data(source_data, is_bw=False): """ process data""" print("source_data length:", len(source_data)) filter_data = {} for index in range(2): filter_mask = source_data[:, 2] == index if np.any(filter_mask): filter_data[index] = sum_data(round_data(source_data[filter_mask]), is_bw) return filter_data
def sum_data(filter_data, is_bw): """ caculate sum""" for index in range(len(filter_data) - 1): if filter_data[index][0] > filter_data[index + 1][0]: max_index = index + 1 break else: max_index = len(filter_data) print("max_index: ", max_index + 1) num_jobs = int(round(len(filter_data) * 1.0 / max_index)) print("num_jobs: ", num_jobs) dict_time = Counter(filter_data[:, 0]) list_sum = [] for time_index in range(1, max_index + 1): if dict_time.get(time_index * 1000, 0) != num_jobs: print("[WARNING] Time %d, number of data %d != num_jobs %d" % ( time_index * 1000, dict_time.get(time_index * 1000, 0), num_jobs )) continue filter_mask = (filter_data[:, 0] == time_index * 1000) sum_rst = np.sum(filter_data[filter_mask][:, 1]) if is_bw: sum_rst = sum_rst / 1024 list_sum.append([time_index, sum_rst]) return np.array(list_sum)
def round_data(filter_data): """ round the data""" for index, _ in enumerate(filter_data): filter_data[index][0] = round(filter_data[index][0] / 100.0) * 100.0 return filter_data
def import_source(self, sheet, source, delimiter=","): """ Function: Save original data into specific sheet, and try to translate data to float type Input: sheet: Must be a non exists sheet source: File path of source """ # check input parameters if ' ' in sheet: raise RuntimeError("Error sheet name: %s" % sheet) if not source.endswith("txt") and not source.endswith("csv"): raise RuntimeError("Error source name: %s" % source) self.source_sheet = sheet source_data = np.loadtxt(source, dtype=str, delimiter=delimiter) self.source_data = {"title": source_data[0].tolist(), "data": source_data[1:]} cell_format_title = self.workbook.add_format({'bold': True, 'font_name': u'等线', 'bg_color': '#c5d9f1', 'rotation': 45}) cell_format = self.workbook.add_format({'bold': False, 'font_name': u'等线', 'num_format': 0}) worksheet = self.workbook.add_worksheet(sheet) worksheet.write_row('A1', self.source_data['title'], cell_format_title) _, col_num = self.source_data['data'].shape for i in range(col_num): try: data_array = self.source_data['data'][:, i].astype(float) except ValueError: data_array = self.source_data['data'][:, i] worksheet.write_column(1, i, data_array.tolist(), cell_format)
def generate_chart(self, properties): """ Function: Generate and save chart to specific sheet. Input: sheet: If already exists, new chart will be added below. Otherwise, it would create a new sheet; x_axis: Specify x axis; y_axis: Specify y axis; series: Specify series; filters: dict type, use to filter useful data from original data; title: if None, the chart will create without title; x_axis_name: if None, use x_axis instead; y_axis_name: if None, use y_axis instead; """ # check input parameters if not {'x_axis', 'y_axis', 'series', 'filters'}.issubset(set(properties.keys())): raise RuntimeError("Error properties: %s" % properties.keys()) # generate chart mask = self.__filter_data(properties['filters']) chart = self.__generate_chart(mask, properties) sheet = properties['sheet'] # Add work sheet if sheet in self.sheet_dict.keys(): self.sheet_dict[sheet] += 1 worksheet = self.workbook.get_worksheet_by_name(sheet) else: self.sheet_dict[sheet] = 1 worksheet = self.workbook.add_worksheet(sheet) worksheet.insert_chart('B%d' % (5 + (self.sheet_dict[sheet] - 1) * 35), chart)
def gen_config_sheet(self, sheetname, plist): """ generate configuration""" worksheet_cfg = self.workbook.add_worksheet(sheetname) cell_format = self.workbook.add_format({'bold': False, 'font_name': u'等线'}) cell_format_title = self.workbook.add_format({'border': 0, 'align': 'center', 'bg_color': '#c5d9f1', 'font_size': 12, 'font_name': u'等线', 'bold': False}) worksheet_cfg.set_column('A:E', 40, cell_format) worksheet_cfg.write_row('A1', plist[0], cell_format_title) for i in range(1, len(plist)): worksheet_cfg.write_row('A%d' % (i+1), plist[i], cell_format)
def gen_data_sheet(self, datafile, para_meter, scale=1.75, steady_time=300): """ datafile, sheetname, x_axis_name, y_axis_name, title, Function: Turn realtime bw data into picture, and save into specific sheet Input: sheetname: If already exists, new chart will be added continuely. Otherwise, it would create new sheet; x_axis_name: x_axis name; y_axis_name: y_axis name; title: picure name; scale; size of picture. """ filename = os.path.splitext(os.path.split(datafile)[1])[0][:-5] para_meter['filename'] = filename source_data = np.loadtxt(datafile, dtype=int, delimiter=',')[:, :3] is_bw = 'bw'in para_meter['title'].lower() file_data = process_rt_data(source_data, is_bw) pic_path = generate_rt_pic(file_data, para_meter, scale) pic_path_steady = generate_steady_rt_pic(file_data, para_meter, scale, steady_time) if para_meter['sheetname'] in self.sheetname_dict.keys(): self.sheetname_dict[para_meter['sheetname']] = \ self.sheetname_dict[para_meter['sheetname']] + 1 chart_sheet = self.workbook.get_worksheet_by_name(para_meter['sheetname']) else: self.sheetname_dict[para_meter['sheetname']] = 1 chart_sheet = self.workbook.add_worksheet(para_meter['sheetname']) chart_sheet.insert_image('B%d' % (5 + (self.sheetname_dict[para_meter['sheetname']] - 1) * 30), pic_path) chart_sheet.insert_image('Q%d' % (5 + (self.sheetname_dict[para_meter['sheetname']] - 1) * 30), pic_path_steady) self.__insert_value(chart_sheet, file_data, 5 + (self.sheetname_dict[para_meter['sheetname']] - 1) * 30, steady_time) self.pic_list.append(pic_path) self.pic_list.append(pic_path_steady)
def env(): """Verify PCI variables and construct exported variables""" if cij.ssh.env(): cij.err("cij.pci.env: invalid SSH environment") return 1 pci = cij.env_to_dict(PREFIX, REQUIRED) pci["BUS_PATH"] = "/sys/bus/pci" pci["DEV_PATH"] = os.sep.join([pci["BUS_PATH"], "devices", pci["DEV_NAME"]]) cij.env_export(PREFIX, EXPORTED, pci) return 0
def info(txt): """Print, emphasized 'neutral', the given 'txt' message""" print("%s# %s%s%s" % (PR_EMPH_CC, get_time_stamp(), txt, PR_NC)) sys.stdout.flush()
def good(txt): """Print, emphasized 'good', the given 'txt' message""" print("%s# %s%s%s" % (PR_GOOD_CC, get_time_stamp(), txt, PR_NC)) sys.stdout.flush()
def warn(txt): """Print, emphasized 'warning', the given 'txt' message""" print("%s# %s%s%s" % (PR_WARN_CC, get_time_stamp(), txt, PR_NC)) sys.stdout.flush()
def err(txt): """Print, emphasized 'error', the given 'txt' message""" print("%s# %s%s%s" % (PR_ERR_CC, get_time_stamp(), txt, PR_NC)) sys.stdout.flush()
def emph(txt, rval=None): """Print, emphasized based on rval""" if rval is None: # rval is not specified, use 'neutral' info(txt) elif rval == 0: # rval is 0, by convention, this is 'good' good(txt) else: # any other value, considered 'bad' err(txt)
def paths_from_env(prefix=None, names=None): """Construct dict of paths from environment variables'""" def expand_path(path): """Expands variables in 'path' and turns it into absolute path""" return os.path.abspath(os.path.expanduser(os.path.expandvars(path))) if prefix is None: prefix = "CIJ" if names is None: names = [ "ROOT", "ENVS", "TESTPLANS", "TESTCASES", "TESTSUITES", "MODULES", "HOOKS", "TEMPLATES" ] conf = {v: os.environ.get("_".join([prefix, v])) for v in names} for env in (e for e in conf.keys() if e[:len(prefix)] in names and conf[e]): conf[env] = expand_path(conf[env]) if not os.path.exists(conf[env]): err("%s_%s: %r, does not exist" % (prefix, env, conf[env])) return conf
def env_to_dict(prefix, names): """ Construct dict from environment variables named: PREFIX_NAME @returns dict of names """ env = {} for name in names: env[name] = ENV.get("_".join([prefix, name])) if env[name] is None: return None return env
def env_export(prefix, exported, env): """ Define the list of 'exported' variables with 'prefix' with values from 'env' """ for exp in exported: ENV["_".join([prefix, exp])] = env[exp]
def env(): """Verify NVME variables and construct exported variables""" if cij.ssh.env(): cij.err("cij.nvm.env: invalid SSH environment") return 1 nvm = cij.env_to_dict(PREFIX, REQUIRED) if "nvme" in nvm["DEV_NAME"]: nvm["DEV_PATH"] = "/dev/%s" % nvm["DEV_NAME"] else: nvm["DEV_PATH"] = "traddr:%s" % nvm["DEV_NAME"] cij.env_export(PREFIX, EXPORTED, nvm) return 0
def exists(): """Verify that the ENV defined NVMe device exists""" if env(): cij.err("cij.nvm.exists: Invalid NVMe ENV.") return 1 nvm = cij.env_to_dict(PREFIX, EXPORTED + REQUIRED) cmd = ['[[ -b "%s" ]]' % nvm["DEV_PATH"]] rcode, _, _ = cij.ssh.command(cmd, shell=True, echo=False) return rcode
def dev_get_rprt(dev_name, pugrp=None, punit=None): """ Get-log-page chunk information If the pugrp and punit is set, then provide report only for that pugrp/punit @returns the first chunk in the given state if one exists, None otherwise """ cmd = ["nvm_cmd", "rprt_all", dev_name] if not (pugrp is None and punit is None): cmd = ["nvm_cmd", "rprt_lun", dev_name, str(pugrp), str(punit)] _, _, _, struct = cij.test.command_to_struct(cmd) if not struct: return None return struct["rprt_descr"]
def dev_get_chunk(dev_name, state, pugrp=None, punit=None): """ Get a chunk-descriptor for the first chunk in the given state. If the pugrp and punit is set, then search only that pugrp/punit @returns the first chunk in the given state if one exists, None otherwise """ rprt = dev_get_rprt(dev_name, pugrp, punit) if not rprt: return None return next((d for d in rprt if d["cs"] == state), None)
def pkill(): """Kill all of FIO processes""" if env(): return 1 cmd = ["ps -aux | grep fio | grep -v grep"] status, _, _ = cij.ssh.command(cmd, shell=True, echo=False) if not status: status, _, _ = cij.ssh.command(["pkill -f fio"], shell=True) if status: return 1 return 0
def __parse_parms(self): """Translate dict parameters to string""" args = list() for key, val in self.__parm.items(): key = key.replace("FIO_", "").lower() if key == "runtime": args.append("--time_based") if val is None: args.append("--%s" % key) else: args.append("--%s=%s" % (key, val)) return args
def import_parms(self, args): """Import external dict to internal dict""" for key, val in args.items(): self.set_parm(key, val)
def get_parm(self, key): """Get parameter of FIO""" if key in self.__parm.keys(): return self.__parm[key] return None
def start(self): """Run FIO job in thread""" self.__thread = Threads(target=self.run, args=(True, True, False)) self.__thread.setDaemon(True) self.__thread.start()
def run(self, shell=True, cmdline=False, echo=True): """Run FIO job""" if env(): return 1 cmd = ["fio"] + self.__parse_parms() if cmdline: cij.emph("cij.fio.run: shell: %r, cmd: %r" % (shell, cmd)) return cij.ssh.command(cmd, shell, echo)
def extract_hook_names(ent): """Extract hook names from the given entity""" hnames = [] for hook in ent["hooks"]["enter"] + ent["hooks"]["exit"]: hname = os.path.basename(hook["fpath_orig"]) hname = os.path.splitext(hname)[0] hname = hname.strip() hname = hname.replace("_enter", "") hname = hname.replace("_exit", "") if hname in hnames: continue hnames.append(hname) hnames.sort() return hnames
def tcase_comment(tcase): """ Extract testcase comment section / testcase description @returns the testcase-comment from the tcase["fpath"] as a list of strings """ src = open(tcase["fpath"]).read() if len(src) < 3: cij.err("rprtr::tcase_comment: invalid src, tcase: %r" % tcase["name"]) return None ext = os.path.splitext(tcase["fpath"])[-1] if ext not in [".sh", ".py"]: cij.err("rprtr::tcase_comment: invalid ext: %r, tcase: %r" % ( ext, tcase["name"] )) return None comment = [] for line in src.splitlines()[2:]: if ext == ".sh" and not line.startswith("#"): break elif ext == ".py" and not '"""' in line: break comment.append(line) return comment
def tcase_parse_descr(tcase): """Parse descriptions from the the given tcase""" descr_short = "SHORT" descr_long = "LONG" try: comment = tcase_comment(tcase) except (IOError, OSError, ValueError) as exc: comment = [] cij.err("tcase_parse_descr: failed: %r, tcase: %r" % (exc, tcase)) comment = [l for l in comment if l.strip()] # Remove empty lines for line_number, line in enumerate(comment): if line.startswith("#"): comment[line_number] = line[1:] if comment: descr_short = comment[0] if len(comment) > 1: descr_long = "\n".join(comment[1:]) return descr_short, descr_long
def runlogs_to_html(run_root): """ Returns content of the given 'fpath' with HTML annotations, currently simply a conversion of ANSI color codes to HTML elements """ if not os.path.isdir(run_root): return "CANNOT_LOCATE_LOGFILES" hook_enter = [] hook_exit = [] tcase = [] for fpath in glob.glob(os.sep.join([run_root, "*.log"])): if "exit" in fpath: hook_exit.append(fpath) continue if "hook" in fpath: hook_enter.append(fpath) continue tcase.append(fpath) content = "" for fpath in hook_enter + tcase + hook_exit: content += "# BEGIN: run-log from log_fpath: %s\n" % fpath content += open(fpath, "r").read() content += "# END: run-log from log_fpath: %s\n\n" % fpath return content
def src_to_html(fpath): """ Returns content of the given 'fpath' with HTML annotations for syntax highlighting """ if not os.path.exists(fpath): return "COULD-NOT-FIND-TESTCASE-SRC-AT-FPATH:%r" % fpath # NOTE: Do SYNTAX highlight? return open(fpath, "r").read()
def aux_listing(aux_root): """Listing""" listing = [] for root, _, fnames in os.walk(aux_root): count = len(aux_root.split(os.sep)) prefix = root.split(os.sep)[count:] for fname in fnames: listing.append(os.sep.join(prefix + [fname])) return listing
def process_tsuite(tsuite): """Goes through the tsuite and processes "*.log" """ # scoop of output from all run-logs tsuite["log_content"] = runlogs_to_html(tsuite["res_root"]) tsuite["aux_list"] = aux_listing(tsuite["aux_root"]) tsuite["hnames"] = extract_hook_names(tsuite) return True
def process_tcase(tcase): """Goes through the trun and processes "run.log" """ tcase["src_content"] = src_to_html(tcase["fpath"]) tcase["log_content"] = runlogs_to_html(tcase["res_root"]) tcase["aux_list"] = aux_listing(tcase["aux_root"]) tcase["descr_short"], tcase["descr_long"] = tcase_parse_descr(tcase) tcase["hnames"] = extract_hook_names(tcase) return True
def process_trun(trun): """Goes through the trun and processes "run.log" """ trun["log_content"] = runlogs_to_html(trun["res_root"]) trun["aux_list"] = aux_listing(trun["aux_root"]) trun["hnames"] = extract_hook_names(trun) return True
def postprocess(trun): """Perform postprocessing of the given test run""" plog = [] plog.append(("trun", process_trun(trun))) for tsuite in trun["testsuites"]: plog.append(("tsuite", process_tsuite(tsuite))) for tcase in tsuite["testcases"]: plog.append(("tcase", process_tcase(tcase))) for task, success in plog: if not success: cij.err("rprtr::postprocess: FAILED for %r" % task) return sum((success for task, success in plog))
def rehome(old, new, struct): """ Replace all absolute paths to "re-home" it """ if old == new: return if isinstance(struct, list): for item in struct: rehome(old, new, item) elif isinstance(struct, dict): for key, val in struct.iteritems(): if isinstance(val, (dict, list)): rehome(old, new, val) elif "conf" in key: continue elif "orig" in key: continue elif "root" in key or "path" in key: struct[key] = struct[key].replace(old, new)
def main(args): """Main entry point""" trun = cij.runner.trun_from_file(args.trun_fpath) rehome(trun["conf"]["OUTPUT"], args.output, trun) postprocess(trun) cij.emph("main: reports are uses tmpl_fpath: %r" % args.tmpl_fpath) cij.emph("main: reports are here args.output: %r" % args.output) html_fpath = os.sep.join([args.output, "%s.html" % args.tmpl_name]) cij.emph("html_fpath: %r" % html_fpath) try: # Create and store HTML report with open(html_fpath, 'w') as html_file: html_file.write(dset_to_html(trun, args.tmpl_fpath)) except (IOError, OSError, ValueError) as exc: import traceback traceback.print_exc() cij.err("rprtr:main: exc: %s" % exc) return 1 return 0
def env(): """Verify SSH variables and construct exported variables""" ssh = cij.env_to_dict(PREFIX, REQUIRED) if "KEY" in ssh: ssh["KEY"] = cij.util.expand_path(ssh["KEY"]) if cij.ENV.get("SSH_PORT") is None: cij.ENV["SSH_PORT"] = "22" cij.warn("cij.ssh.env: SSH_PORT was not set, assigned: %r" % ( cij.ENV.get("SSH_PORT") )) if cij.ENV.get("SSH_CMD_TIME") is None: cij.ENV["SSH_CMD_TIME"] = "1" cij.warn("cij.ssh.env: SSH_CMD_TIME was not set, assigned: %r" % ( cij.ENV.get("SSH_CMD_TIME") )) return 0
def command(cmd, shell=True, echo=True, suffix=None): """SSH: Run the given command over SSH as defined in environment""" if env(): cij.err("cij.ssh.command: Invalid SSH environment") return 1 prefix = [] if cij.ENV.get("SSH_CMD_TIME") == "1": prefix.append("/usr/bin/time") if cij.ENV.get("SSH_CMD_TIMEOUT"): prefix.append("timeout") prefix.append(cij.ENV.get("SSH_CMD_TIMEOUT")) prefix.append("ssh") args = [] if cij.ENV.get("SSH_KEY"): args.append("-i") args.append(cij.ENV.get("SSH_KEY")) if cij.ENV.get("SSH_PORT"): args.append("-p") args.append(cij.ENV.get("SSH_PORT")) args.append("@".join([cij.ENV.get("SSH_USER"), cij.ENV.get("SSH_HOST")])) wrapped = prefix + args + ["'%s'" % " ".join(cmd)] if suffix: wrapped += suffix return cij.util.execute(wrapped, shell, echo)
def pull(src, dst, folder=False): """SSH: pull data from remote linux""" if env(): cij.err("cij.ssh.pull: Invalid SSH environment") return 1 args = [] if cij.ENV.get("SSH_KEY"): args.append("-i") args.append(cij.ENV.get("SSH_KEY")) if cij.ENV.get("SSH_PORT"): args.append("-P") args.append(cij.ENV.get("SSH_PORT")) if folder: args.append("-r") target = "%s:%s" % ("@".join([cij.ENV.get("SSH_USER"), cij.ENV.get("SSH_HOST")]), src) wrapped = ["scp", " ".join(args), target, dst] return cij.util.execute(wrapped, shell=True, echo=True)
def wait(timeout=300): """Wait util target connected""" if env(): cij.err("cij.ssh.wait: Invalid SSH environment") return 1 timeout_backup = cij.ENV.get("SSH_CMD_TIMEOUT") try: time_start = time.time() cij.ENV["SSH_CMD_TIMEOUT"] = "3" while True: time_current = time.time() if (time_current - time_start) > timeout: cij.err("cij.ssh.wait: Timeout") return 1 status, _, _ = command(["exit"], shell=True, echo=False) if not status: break cij.info("cij.ssh.wait: Time elapsed: %d seconds" % (time_current - time_start)) finally: if timeout_backup is None: del cij.ENV["SSH_CMD_TIMEOUT"] else: cij.ENV["SSH_CMD_TIMEOUT"] = timeout_backup return 0
def reboot(timeout=300, extra=""): """Reboot target""" if env(): cij.err("cij.ssh.reboot: Invalid SSH environment") return 1 timeout_backup = cij.ENV.get("SSH_CMD_TIMEOUT") try: time_start = time.time() status, last_uptime, _ = command(["/usr/bin/uptime -s"], shell=True, echo=False) if status: return 1 cij.ENV["SSH_CMD_TIMEOUT"] = "3" cij.info("cij.ssh.reboot: Target: %s" % cij.ENV.get("SSH_HOST")) command(["reboot %s" % extra], shell=True, echo=False) while True: time_current = time.time() if (time_current - time_start) > timeout: cij.err("cij.ssh.reboot: Timeout") return 1 status, current_uptime, _ = command(["/usr/bin/uptime -s"], shell=True, echo=False) if not status and current_uptime != last_uptime: break cij.info("cij.ssh.reboot: Time elapsed: %d seconds" % (time_current - time_start)) finally: if timeout_backup is None: del cij.ENV["SSH_CMD_TIMEOUT"] else: cij.ENV["SSH_CMD_TIMEOUT"] = timeout_backup return 0
def assert_that(val, description=''): """Factory method for the assertion builder with value to be tested and optional description.""" global _soft_ctx if _soft_ctx: return AssertionBuilder(val, description, 'soft') return AssertionBuilder(val, description)
def contents_of(f, encoding='utf-8'): """Helper to read the contents of the given file or path into a string with the given encoding. Encoding defaults to 'utf-8', other useful encodings are 'ascii' and 'latin-1'.""" try: contents = f.read() except AttributeError: try: with open(f, 'r') as fp: contents = fp.read() except TypeError: raise ValueError('val must be file or path, but was type <%s>' % type(f).__name__) except OSError: if not isinstance(f, str_types): raise ValueError('val must be file or path, but was type <%s>' % type(f).__name__) raise if sys.version_info[0] == 3 and type(contents) is bytes: # in PY3 force decoding of bytes to target encoding return contents.decode(encoding, 'replace') elif sys.version_info[0] == 2 and encoding == 'ascii': # in PY2 force encoding back to ascii return contents.encode('ascii', 'replace') else: # in all other cases, try to decode to target encoding try: return contents.decode(encoding, 'replace') except AttributeError: pass # if all else fails, just return the contents "as is" return contents
def soft_fail(msg=''): """Adds error message to soft errors list if within soft assertions context. Either just force test failure with the given message.""" global _soft_ctx if _soft_ctx: global _soft_err _soft_err.append('Fail: %s!' % msg if msg else 'Fail!') return fail(msg)
def is_equal_to(self, other, **kwargs): """Asserts that val is equal to other.""" if self._check_dict_like(self.val, check_values=False, return_as_bool=True) and \ self._check_dict_like(other, check_values=False, return_as_bool=True): if self._dict_not_equal(self.val, other, ignore=kwargs.get('ignore'), include=kwargs.get('include')): self._dict_err(self.val, other, ignore=kwargs.get('ignore'), include=kwargs.get('include')) else: if self.val != other: self._err('Expected <%s> to be equal to <%s>, but was not.' % (self.val, other)) return self
def is_not_equal_to(self, other): """Asserts that val is not equal to other.""" if self.val == other: self._err('Expected <%s> to be not equal to <%s>, but was.' % (self.val, other)) return self
def is_same_as(self, other): """Asserts that the val is identical to other, via 'is' compare.""" if self.val is not other: self._err('Expected <%s> to be identical to <%s>, but was not.' % (self.val, other)) return self
def is_not_same_as(self, other): """Asserts that the val is not identical to other, via 'is' compare.""" if self.val is other: self._err('Expected <%s> to be not identical to <%s>, but was.' % (self.val, other)) return self
def is_type_of(self, some_type): """Asserts that val is of the given type.""" if type(some_type) is not type and\ not issubclass(type(some_type), type): raise TypeError('given arg must be a type') if type(self.val) is not some_type: if hasattr(self.val, '__name__'): t = self.val.__name__ elif hasattr(self.val, '__class__'): t = self.val.__class__.__name__ else: t = 'unknown' self._err('Expected <%s:%s> to be of type <%s>, but was not.' % (self.val, t, some_type.__name__)) return self
def is_instance_of(self, some_class): """Asserts that val is an instance of the given class.""" try: if not isinstance(self.val, some_class): if hasattr(self.val, '__name__'): t = self.val.__name__ elif hasattr(self.val, '__class__'): t = self.val.__class__.__name__ else: t = 'unknown' self._err('Expected <%s:%s> to be instance of class <%s>, but was not.' % (self.val, t, some_class.__name__)) except TypeError: raise TypeError('given arg must be a class') return self
def is_length(self, length): """Asserts that val is the given length.""" if type(length) is not int: raise TypeError('given arg must be an int') if length < 0: raise ValueError('given arg must be a positive int') if len(self.val) != length: self._err('Expected <%s> to be of length <%d>, but was <%d>.' % (self.val, length, len(self.val))) return self
def contains(self, *items): """Asserts that val contains the given item or items.""" if len(items) == 0: raise ValueError('one or more args must be given') elif len(items) == 1: if items[0] not in self.val: if self._check_dict_like(self.val, return_as_bool=True): self._err('Expected <%s> to contain key <%s>, but did not.' % (self.val, items[0])) else: self._err('Expected <%s> to contain item <%s>, but did not.' % (self.val, items[0])) else: missing = [] for i in items: if i not in self.val: missing.append(i) if missing: if self._check_dict_like(self.val, return_as_bool=True): self._err('Expected <%s> to contain keys %s, but did not contain key%s %s.' % (self.val, self._fmt_items(items), '' if len(missing) == 0 else 's', self._fmt_items(missing))) else: self._err('Expected <%s> to contain items %s, but did not contain %s.' % (self.val, self._fmt_items(items), self._fmt_items(missing))) return self
def does_not_contain(self, *items): """Asserts that val does not contain the given item or items.""" if len(items) == 0: raise ValueError('one or more args must be given') elif len(items) == 1: if items[0] in self.val: self._err('Expected <%s> to not contain item <%s>, but did.' % (self.val, items[0])) else: found = [] for i in items: if i in self.val: found.append(i) if found: self._err('Expected <%s> to not contain items %s, but did contain %s.' % (self.val, self._fmt_items(items), self._fmt_items(found))) return self
def contains_only(self, *items): """Asserts that val contains only the given item or items.""" if len(items) == 0: raise ValueError('one or more args must be given') else: extra = [] for i in self.val: if i not in items: extra.append(i) if extra: self._err('Expected <%s> to contain only %s, but did contain %s.' % (self.val, self._fmt_items(items), self._fmt_items(extra))) missing = [] for i in items: if i not in self.val: missing.append(i) if missing: self._err('Expected <%s> to contain only %s, but did not contain %s.' % (self.val, self._fmt_items(items), self._fmt_items(missing))) return self
def contains_sequence(self, *items): """Asserts that val contains the given sequence of items in order.""" if len(items) == 0: raise ValueError('one or more args must be given') else: try: for i in xrange(len(self.val) - len(items) + 1): for j in xrange(len(items)): if self.val[i+j] != items[j]: break else: return self except TypeError: raise TypeError('val is not iterable') self._err('Expected <%s> to contain sequence %s, but did not.' % (self.val, self._fmt_items(items)))
def contains_duplicates(self): """Asserts that val is iterable and contains duplicate items.""" try: if len(self.val) != len(set(self.val)): return self except TypeError: raise TypeError('val is not iterable') self._err('Expected <%s> to contain duplicates, but did not.' % self.val)
def does_not_contain_duplicates(self): """Asserts that val is iterable and does not contain any duplicate items.""" try: if len(self.val) == len(set(self.val)): return self except TypeError: raise TypeError('val is not iterable') self._err('Expected <%s> to not contain duplicates, but did.' % self.val)
def is_empty(self): """Asserts that val is empty.""" if len(self.val) != 0: if isinstance(self.val, str_types): self._err('Expected <%s> to be empty string, but was not.' % self.val) else: self._err('Expected <%s> to be empty, but was not.' % self.val) return self
def is_not_empty(self): """Asserts that val is not empty.""" if len(self.val) == 0: if isinstance(self.val, str_types): self._err('Expected not empty string, but was empty.') else: self._err('Expected not empty, but was empty.') return self
def is_in(self, *items): """Asserts that val is equal to one of the given items.""" if len(items) == 0: raise ValueError('one or more args must be given') else: for i in items: if self.val == i: return self self._err('Expected <%s> to be in %s, but was not.' % (self.val, self._fmt_items(items)))
def is_nan(self): """Asserts that val is real number and NaN (not a number).""" self._validate_number() self._validate_real() if not math.isnan(self.val): self._err('Expected <%s> to be <NaN>, but was not.' % self.val) return self
def is_not_nan(self): """Asserts that val is real number and not NaN (not a number).""" self._validate_number() self._validate_real() if math.isnan(self.val): self._err('Expected not <NaN>, but was.') return self
def is_inf(self): """Asserts that val is real number and Inf (infinity).""" self._validate_number() self._validate_real() if not math.isinf(self.val): self._err('Expected <%s> to be <Inf>, but was not.' % self.val) return self
def is_not_inf(self): """Asserts that val is real number and not Inf (infinity).""" self._validate_number() self._validate_real() if math.isinf(self.val): self._err('Expected not <Inf>, but was.') return self
def is_less_than(self, other): """Asserts that val is numeric and is less than other.""" self._validate_compareable(other) if self.val >= other: if type(self.val) is datetime.datetime: self._err('Expected <%s> to be less than <%s>, but was not.' % (self.val.strftime('%Y-%m-%d %H:%M:%S'), other.strftime('%Y-%m-%d %H:%M:%S'))) else: self._err('Expected <%s> to be less than <%s>, but was not.' % (self.val, other)) return self
def is_between(self, low, high): """Asserts that val is numeric and is between low and high.""" val_type = type(self.val) self._validate_between_args(val_type, low, high) if self.val < low or self.val > high: if val_type is datetime.datetime: self._err('Expected <%s> to be between <%s> and <%s>, but was not.' % (self.val.strftime('%Y-%m-%d %H:%M:%S'), low.strftime('%Y-%m-%d %H:%M:%S'), high.strftime('%Y-%m-%d %H:%M:%S'))) else: self._err('Expected <%s> to be between <%s> and <%s>, but was not.' % (self.val, low, high)) return self
def is_close_to(self, other, tolerance): """Asserts that val is numeric and is close to other within tolerance.""" self._validate_close_to_args(self.val, other, tolerance) if self.val < (other-tolerance) or self.val > (other+tolerance): if type(self.val) is datetime.datetime: tolerance_seconds = tolerance.days * 86400 + tolerance.seconds + tolerance.microseconds / 1000000 h, rem = divmod(tolerance_seconds, 3600) m, s = divmod(rem, 60) self._err('Expected <%s> to be close to <%s> within tolerance <%d:%02d:%02d>, but was not.' % (self.val.strftime('%Y-%m-%d %H:%M:%S'), other.strftime('%Y-%m-%d %H:%M:%S'), h, m, s)) else: self._err('Expected <%s> to be close to <%s> within tolerance <%s>, but was not.' % (self.val, other, tolerance)) return self
def is_equal_to_ignoring_case(self, other): """Asserts that val is case-insensitive equal to other.""" if not isinstance(self.val, str_types): raise TypeError('val is not a string') if not isinstance(other, str_types): raise TypeError('given arg must be a string') if self.val.lower() != other.lower(): self._err('Expected <%s> to be case-insensitive equal to <%s>, but was not.' % (self.val, other)) return self
def contains_ignoring_case(self, *items): """Asserts that val is string and contains the given item or items.""" if len(items) == 0: raise ValueError('one or more args must be given') if isinstance(self.val, str_types): if len(items) == 1: if not isinstance(items[0], str_types): raise TypeError('given arg must be a string') if items[0].lower() not in self.val.lower(): self._err('Expected <%s> to case-insensitive contain item <%s>, but did not.' % (self.val, items[0])) else: missing = [] for i in items: if not isinstance(i, str_types): raise TypeError('given args must all be strings') if i.lower() not in self.val.lower(): missing.append(i) if missing: self._err('Expected <%s> to case-insensitive contain items %s, but did not contain %s.' % (self.val, self._fmt_items(items), self._fmt_items(missing))) elif isinstance(self.val, Iterable): missing = [] for i in items: if not isinstance(i, str_types): raise TypeError('given args must all be strings') found = False for v in self.val: if not isinstance(v, str_types): raise TypeError('val items must all be strings') if i.lower() == v.lower(): found = True break if not found: missing.append(i) if missing: self._err('Expected <%s> to case-insensitive contain items %s, but did not contain %s.' % (self.val, self._fmt_items(items), self._fmt_items(missing))) else: raise TypeError('val is not a string or iterable') return self
def starts_with(self, prefix): """Asserts that val is string or iterable and starts with prefix.""" if prefix is None: raise TypeError('given prefix arg must not be none') if isinstance(self.val, str_types): if not isinstance(prefix, str_types): raise TypeError('given prefix arg must be a string') if len(prefix) == 0: raise ValueError('given prefix arg must not be empty') if not self.val.startswith(prefix): self._err('Expected <%s> to start with <%s>, but did not.' % (self.val, prefix)) elif isinstance(self.val, Iterable): if len(self.val) == 0: raise ValueError('val must not be empty') first = next(iter(self.val)) if first != prefix: self._err('Expected %s to start with <%s>, but did not.' % (self.val, prefix)) else: raise TypeError('val is not a string or iterable') return self
def ends_with(self, suffix): """Asserts that val is string or iterable and ends with suffix.""" if suffix is None: raise TypeError('given suffix arg must not be none') if isinstance(self.val, str_types): if not isinstance(suffix, str_types): raise TypeError('given suffix arg must be a string') if len(suffix) == 0: raise ValueError('given suffix arg must not be empty') if not self.val.endswith(suffix): self._err('Expected <%s> to end with <%s>, but did not.' % (self.val, suffix)) elif isinstance(self.val, Iterable): if len(self.val) == 0: raise ValueError('val must not be empty') last = None for last in self.val: pass if last != suffix: self._err('Expected %s to end with <%s>, but did not.' % (self.val, suffix)) else: raise TypeError('val is not a string or iterable') return self
def matches(self, pattern): """Asserts that val is string and matches regex pattern.""" if not isinstance(self.val, str_types): raise TypeError('val is not a string') if not isinstance(pattern, str_types): raise TypeError('given pattern arg must be a string') if len(pattern) == 0: raise ValueError('given pattern arg must not be empty') if re.search(pattern, self.val) is None: self._err('Expected <%s> to match pattern <%s>, but did not.' % (self.val, pattern)) return self
def is_alpha(self): """Asserts that val is non-empty string and all characters are alphabetic.""" if not isinstance(self.val, str_types): raise TypeError('val is not a string') if len(self.val) == 0: raise ValueError('val is empty') if not self.val.isalpha(): self._err('Expected <%s> to contain only alphabetic chars, but did not.' % self.val) return self
def is_digit(self): """Asserts that val is non-empty string and all characters are digits.""" if not isinstance(self.val, str_types): raise TypeError('val is not a string') if len(self.val) == 0: raise ValueError('val is empty') if not self.val.isdigit(): self._err('Expected <%s> to contain only digits, but did not.' % self.val) return self
def is_lower(self): """Asserts that val is non-empty string and all characters are lowercase.""" if not isinstance(self.val, str_types): raise TypeError('val is not a string') if len(self.val) == 0: raise ValueError('val is empty') if self.val != self.val.lower(): self._err('Expected <%s> to contain only lowercase chars, but did not.' % self.val) return self
def is_upper(self): """Asserts that val is non-empty string and all characters are uppercase.""" if not isinstance(self.val, str_types): raise TypeError('val is not a string') if len(self.val) == 0: raise ValueError('val is empty') if self.val != self.val.upper(): self._err('Expected <%s> to contain only uppercase chars, but did not.' % self.val) return self
def is_unicode(self): """Asserts that val is a unicode string.""" if type(self.val) is not unicode: self._err('Expected <%s> to be unicode, but was <%s>.' % (self.val, type(self.val).__name__)) return self
def is_subset_of(self, *supersets): """Asserts that val is iterable and a subset of the given superset or flattened superset if multiple supersets are given.""" if not isinstance(self.val, Iterable): raise TypeError('val is not iterable') if len(supersets) == 0: raise ValueError('one or more superset args must be given') missing = [] if hasattr(self.val, 'keys') and callable(getattr(self.val, 'keys')) and hasattr(self.val, '__getitem__'): # flatten superset dicts superdict = {} for l,j in enumerate(supersets): self._check_dict_like(j, check_values=False, name='arg #%d' % (l+1)) for k in j.keys(): superdict.update({k: j[k]}) for i in self.val.keys(): if i not in superdict: missing.append({i: self.val[i]}) # bad key elif self.val[i] != superdict[i]: missing.append({i: self.val[i]}) # bad val if missing: self._err('Expected <%s> to be subset of %s, but %s %s missing.' % (self.val, self._fmt_items(superdict), self._fmt_items(missing), 'was' if len(missing) == 1 else 'were')) else: # flatten supersets superset = set() for j in supersets: try: for k in j: superset.add(k) except Exception: superset.add(j) for i in self.val: if i not in superset: missing.append(i) if missing: self._err('Expected <%s> to be subset of %s, but %s %s missing.' % (self.val, self._fmt_items(superset), self._fmt_items(missing), 'was' if len(missing) == 1 else 'were')) return self
def contains_key(self, *keys): """Asserts the val is a dict and contains the given key or keys. Alias for contains().""" self._check_dict_like(self.val, check_values=False, check_getitem=False) return self.contains(*keys)
def does_not_contain_key(self, *keys): """Asserts the val is a dict and does not contain the given key or keys. Alias for does_not_contain().""" self._check_dict_like(self.val, check_values=False, check_getitem=False) return self.does_not_contain(*keys)
def contains_value(self, *values): """Asserts that val is a dict and contains the given value or values.""" self._check_dict_like(self.val, check_getitem=False) if len(values) == 0: raise ValueError('one or more value args must be given') missing = [] for v in values: if v not in self.val.values(): missing.append(v) if missing: self._err('Expected <%s> to contain values %s, but did not contain %s.' % (self.val, self._fmt_items(values), self._fmt_items(missing))) return self