-
Notifications
You must be signed in to change notification settings - Fork 18
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
add fs_mark harness #233
base: master
Are you sure you want to change the base?
add fs_mark harness #233
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
$(eval $(call add_test, fs-mark-clean)) | ||
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,98 @@ | ||
/* | ||
* Phoenix-RTOS | ||
* | ||
* Cleanup after fs_mark (executed with files kept). | ||
* | ||
* Copyright 2023 Phoenix Systems | ||
* Author: Adam Debek | ||
* | ||
* This file is part of Phoenix-RTOS. | ||
* | ||
* %LICENSE% | ||
*/ | ||
|
||
#include <stdint.h> | ||
#include <errno.h> | ||
#include <stdlib.h> | ||
#include <unistd.h> | ||
#include <stdio.h> | ||
#include <dirent.h> | ||
#include <string.h> | ||
#include <limits.h> | ||
|
||
static int remove_dir_recursive(const char *dirPath) | ||
{ | ||
DIR *dir; | ||
char entryPath[PATH_MAX]; | ||
struct dirent *entry; | ||
int (*remove_func)(const char *); | ||
int ret, try = 0; | ||
|
||
dir = opendir(dirPath); | ||
if (dir == NULL) { | ||
fprintf(stderr, "Opendir() failed errno: %s\n", strerror(errno)); | ||
return -1; | ||
} | ||
|
||
if (dir) { | ||
while ((entry = readdir(dir)) != NULL) { | ||
if (!strcmp(".", entry->d_name) || !strcmp("..", entry->d_name)) { | ||
continue; | ||
} | ||
sprintf(entryPath, "%s/%s", dirPath, entry->d_name); | ||
remove_func = entry->d_type == DT_DIR ? remove_dir_recursive : remove; | ||
if (remove_func(entryPath) != 0) { | ||
closedir(dir); | ||
return -1; | ||
} | ||
} | ||
|
||
if (closedir(dir)) { | ||
return -1; | ||
} | ||
} | ||
|
||
errno = 0; | ||
while ((ret = rmdir(dirPath)) < 0) { | ||
if (errno == ENOTEMPTY && try < 5) { | ||
remove_dir_recursive(dirPath); | ||
try++; | ||
} | ||
else if (errno == ENOENT) { | ||
errno = 0; | ||
return 0; | ||
} | ||
else { | ||
return -1; | ||
} | ||
} | ||
|
||
return ret; | ||
} | ||
|
||
int main(int argc, char **argv) | ||
{ | ||
int i; | ||
DIR *dir; | ||
|
||
if (argc < 2) { | ||
fprintf(stderr, "Usage: %s [dir1] ... [dirN]\n", argv[0]); | ||
return 1; | ||
} | ||
|
||
for (i = 1; i < argc; i++) { | ||
if ((dir = opendir(argv[i])) == NULL && errno == ENOENT) { | ||
fprintf(stderr, "Nonexistent directory name\n"); | ||
return 1; | ||
} | ||
/* Clean test directory */ | ||
errno = 0; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why is there errno ? |
||
if (remove_dir_recursive(argv[i]) < 0) { | ||
fprintf(stderr, "Error in remove_dir_recursive() errno: %s\n", strerror(errno)); | ||
return 1; | ||
} | ||
} | ||
|
||
fprintf(stderr, "Clean successful\n"); | ||
return 0; | ||
} |
Original file line number | Diff line number | Diff line change | ||||||||||||||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
@@ -0,0 +1,159 @@ | ||||||||||||||||||||||||||
import trunner | ||||||||||||||||||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Group imports (standard library, third-party, and local imports) |
||||||||||||||||||||||||||
import time | ||||||||||||||||||||||||||
import re | ||||||||||||||||||||||||||
import pexpect | ||||||||||||||||||||||||||
import timing_data as t_data | ||||||||||||||||||||||||||
import psh.tools.psh as psh | ||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
from trunner.ctx import TestContext | ||||||||||||||||||||||||||
from trunner.dut import Dut | ||||||||||||||||||||||||||
from trunner.types import TestResult, Status | ||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
def clean(dut, test_name, ctx: TestContext) -> int: | ||||||||||||||||||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
|
||||||||||||||||||||||||||
# Dummyfs doesn't need clean since target is rebooted after every test | ||||||||||||||||||||||||||
if not ctx.target.rootfs: | ||||||||||||||||||||||||||
return 0 | ||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
if test_name and ('-k' in test_name or '-L' in test_name or '-F' in test_name): | ||||||||||||||||||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why are we cleaning only when specifying There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Directory has to be empty in order to remove it. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This three options are -k = keep files, -L = loop, -F = fill fs. All of them cause |
||||||||||||||||||||||||||
args = test_name.split() | ||||||||||||||||||||||||||
args = args[1:] | ||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
def get_dir(x): | ||||||||||||||||||||||||||
if args[x[0] - 1] == '-d': | ||||||||||||||||||||||||||
return x[1] | ||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
dirs = list(map(lambda x: get_dir(x), enumerate(args))) | ||||||||||||||||||||||||||
dirs = [x for x in dirs if x is not None] | ||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
dut.expect(re.escape(ctx.target.shell_prompt), timeout=120) | ||||||||||||||||||||||||||
psh._send(dut, f'/bin/fs_mark_clean {" ".join(dirs)}') | ||||||||||||||||||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. So, if we're using dut everywhere, let's use it here too instead of importing psh |
||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
idx = dut.expect([re.escape(ctx.target.shell_prompt), | ||||||||||||||||||||||||||
r".+?Error in remove_dir_recursive.+?", | ||||||||||||||||||||||||||
pexpect.TIMEOUT], timeout=1800) | ||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
if idx == 0: | ||||||||||||||||||||||||||
return 0 | ||||||||||||||||||||||||||
elif idx == 1: | ||||||||||||||||||||||||||
return -1 | ||||||||||||||||||||||||||
elif idx == 2: | ||||||||||||||||||||||||||
return -2 | ||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
Comment on lines
+36
to
+42
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. maybe something like: if idx in [0,1,2]:
return -idx |
||||||||||||||||||||||||||
else: | ||||||||||||||||||||||||||
return 0 | ||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Let's place there sample output from fs mark like it's done for mbedtls - it will be much easier to analyze the harness then, sth like:
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I tried to add it like in mbedtls but flake8 complain about unused variable There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I would still include a sample output, for example, in the form of: |
||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
def harness(dut: Dut, ctx: TestContext, result: TestResult): | ||||||||||||||||||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. you could specify what the function should return |
||||||||||||||||||||||||||
target = trunner.ctx.target.name | ||||||||||||||||||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Shouldn't |
||||||||||||||||||||||||||
test_status = Status.OK | ||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
test_name = None | ||||||||||||||||||||||||||
test_msg = '' | ||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
loop_start = None | ||||||||||||||||||||||||||
loop_end = None | ||||||||||||||||||||||||||
loop_time = 1200 | ||||||||||||||||||||||||||
first_loop_done = False | ||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
NAME = r".+?# (?P<name>(/bin/)?fs_mark.+?)\r+\n" | ||||||||||||||||||||||||||
MSG_LINE = r"(?P<line>(\s+\d+){3}.+?)\r+\n" | ||||||||||||||||||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Hmm, we could split the line even within the regex like There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Such regex would be too time consuming and There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Could you clarify whether there have been any issues with regex on any specific target? From my current testing on ia32-generic-qemu, I've managed to capture regex results into groups smoothly, both under no load and under stress. However, I haven't tested this extensively. My regex: TYPES = [
"f_use",
"count",
"size",
"files_sec",
"app_overhead",
"creatMin",
"creatAbg",
"creatMax",
"writeMin",
"writeAbg",
"writeMax",
"fsyncMin",
"fsyncAbg",
"fsyncMax",
"syncMin",
"syncAbg",
"syncMax",
"closeMin",
"closeAbg",
"closeMax",
"unlinkMin",
"unlinkAbg",
"unlinkMax",
]
MSG_LINE = "".join([rf"(?P<{type}>-?\d+\.?\d*)\s+" for type in TYPES]) + r"\r?\n" Code on which that was tested: while True:
if loop_start and loop_end:
loop_time = 3 * (loop_end - loop_start)
loop_start = None
loop_end = None
idx = dut.expect([NAME, MSG_LINE, NO_SPC, END, NO_CONT_BLOCK, ERR], timeout=60)
parsed = dut.match.groupdict()
print(parsed)
if idx == 0:
test_name = parsed["name"]
loop_start = time.time()
elif idx == 1:
pass Output: �[0J(psh)% /bin/fs_mark -d /fs_mark_test -s 0 -n 10 -v -S 0 -L 10
# /bin/fs_mark -d /fs_mark_test -s 0 -n 10 -v -S 0 -L 10
{'name': '/bin/fs_mark -d /fs_mark_test -s 0 -n 10 -v -S 0 -L 10 '}
# Version 3.3, 1 thread(s) starting at Thu Jan 1 00:00:08 1970
# Sync method: NO SYNC: Test does not issue sync() or fsync() calls.
# Directories: no subdirectories used
# File names: 40 bytes long, (16 initial bytes of time stamp with 24 random bytes at end of name)
# Files info: size 0 bytes, written with an IO size of 16384 bytes per write
# App overhead is time in microseconds spent in the test not doing file writing related system calls.
# All system call times are reported in microseconds.
FSUse% Count Size Files/sec App Overhead CREAT (Min/Avg/Max) WRITE (Min/Avg/Max) FSYNC (Min/Avg/Max) SYNC (Min/Avg/Max) CLOSE (Min/Avg/Max) UNLINK (Min/Avg/Max)
27 10 0 1.7 181866 358108 547925 712653 1601 3120 6763 0 0 0 0 0 0 1404 3829 15385 0 0 0
{'f_use': '27', 'count': '10', 'size': '0', 'files_sec': '1.7', 'app_overhead': '181866', 'creatMin': '358108', 'creatAbg': '547925', 'creatMax': '712653', 'writeMin': '1601', 'writeAbg': '3120', 'writeMax': '6763', 'fsyncMin': '0', 'fsyncAbg': '0', 'fsyncMax': '0', 'syncMin': '0', 'syncAbg': '0', 'syncMax': '0', 'closeMin': '1404', 'closeAbg': '3829', 'closeMax': '15385', 'unlinkMin': '0', 'unlinkAbg': '0', 'unlinkMax': '0'}
27 20 0 1.6 237227 419356 590063 842584 1844 3814 7751 0 0 0 0 0 0 1814 4079 9268 0 0 0
27 30 0 1.4 213842 451503 674516 982013 1746 5525 15782 0 0 0 0 0 0 1719 5564 20754 0 0 0
27 40 0 1.5 172467 517018 643414 737076 2138 3036 5709 0 0 0 0 0 0 1981 5020 10241 0 0 0
27 50 0 1.7 156668 501367 577613 700207 2071 3411 6560 0 0 0 0 0 0 1709 2290 3061 0 0 0
28 60 0 1.5 216290 546834 630984 836860 1656 3969 10902 0 0 0 0 0 0 1937 3399 6637 0 0 0
28 70 0 1.5 192122 431973 632832 793497 1250 3366 12533 0 0 0 0 0 0 1438 2688 5080 0 0 0
28 80 0 1.6 139242 479825 622701 775921 1730 4523 10685 0 0 0 0 0 0 1837 3346 9047 0 0 0
{'f_use': '27', 'count': '20', 'size': '0', 'files_sec': '1.6', 'app_overhead': '237227', 'creatMin': '419356', 'creatAbg': '590063', 'creatMax': '842584', 'writeMin': '1844', 'writeAbg': '3814', 'writeMax': '7751', 'fsyncMin': '0', 'fsyncAbg': '0', 'fsyncMax': '0', 'syncMin': '0', 'syncAbg': '0', 'syncMax': '0', 'closeMin': '1814', 'closeAbg': '4079', 'closeMax': '9268', 'unlinkMin': '0', 'unlinkAbg': '0', 'unlinkMax': '0'}
{'f_use': '27', 'count': '30', 'size': '0', 'files_sec': '1.4', 'app_overhead': '213842', 'creatMin': '451503', 'creatAbg': '674516', 'creatMax': '982013', 'writeMin': '1746', 'writeAbg': '5525', 'writeMax': '15782', 'fsyncMin': '0', 'fsyncAbg': '0', 'fsyncMax': '0', 'syncMin': '0', 'syncAbg': '0', 'syncMax': '0', 'closeMin': '1719', 'closeAbg': '5564', 'closeMax': '20754', 'unlinkMin': '0', 'unlinkAbg': '0', 'unlinkMax': '0'}
{'f_use': '27', 'count': '40', 'size': '0', 'files_sec': '1.5', 'app_overhead': '172467', 'creatMin': '517018', 'creatAbg': '643414', 'creatMax': '737076', 'writeMin': '2138', 'writeAbg': '3036', 'writeMax': '5709', 'fsyncMin': '0', 'fsyncAbg': '0', 'fsyncMax': '0', 'syncMin': '0', 'syncAbg': '0', 'syncMax': '0', 'closeMin': '1981', 'closeAbg': '5020', 'closeMax': '10241', 'unlinkMin': '0', 'unlinkAbg': '0', 'unlinkMax': '0'}
{'f_use': '27', 'count': '50', 'size': '0', 'files_sec': '1.7', 'app_overhead': '156668', 'creatMin': '501367', 'creatAbg': '577613', 'creatMax': '700207', 'writeMin': '2071', 'writeAbg': '3411', 'writeMax': '6560', 'fsyncMin': '0', 'fsyncAbg': '0', 'fsyncMax': '0', 'syncMin': '0', 'syncAbg': '0', 'syncMax': '0', 'closeMin': '1709', 'closeAbg': '2290', 'closeMax': '3061', 'unlinkMin': '0', 'unlinkAbg': '0', 'unlinkMax': '0'}
{'f_use': '28', 'count': '60', 'size': '0', 'files_sec': '1.5', 'app_overhead': '216290', 'creatMin': '546834', 'creatAbg': '630984', 'creatMax': '836860', 'writeMin': '1656', 'writeAbg': '3969', 'writeMax': '10902', 'fsyncMin': '0', 'fsyncAbg': '0', 'fsyncMax': '0', 'syncMin': '0', 'syncAbg': '0', 'syncMax': '0', 'closeMin': '1937', 'closeAbg': '3399', 'closeMax': '6637', 'unlinkMin': '0', 'unlinkAbg': '0', 'unlinkMax': '0'}
{'f_use': '28', 'count': '70', 'size': '0', 'files_sec': '1.5', 'app_overhead': '192122', 'creatMin': '431973', 'creatAbg': '632832', 'creatMax': '793497', 'writeMin': '1250', 'writeAbg': '3366', 'writeMax': '12533', 'fsyncMin': '0', 'fsyncAbg': '0', 'fsyncMax': '0', 'syncMin': '0', 'syncAbg': '0', 'syncMax': '0', 'closeMin': '1438', 'closeAbg': '2688', 'closeMax': '5080', 'unlinkMin': '0', 'unlinkAbg': '0', 'unlinkMax': '0'}
{'f_use': '28', 'count': '80', 'size': '0', 'files_sec': '1.6', 'app_overhead': '139242', 'creatMin': '479825', 'creatAbg': '622701', 'creatMax': '775921', 'writeMin': '1730', 'writeAbg': '4523', 'writeMax': '10685', 'fsyncMin': '0', 'fsyncAbg': '0', 'fsyncMax': '0', 'syncMin': '0', 'syncAbg': '0', 'syncMax': '0', 'closeMin': '1837', 'closeAbg': '3346', 'closeMax': '9047', 'unlinkMin': '0', 'unlinkAbg': '0', 'unlinkMax': '0'}
28 90 0 1.5 225379 418652 633652 1053823 1616 3947 18033 0 0 0 0 0 0 2179 4394 10715 0 0 0
{'f_use': '28', 'count': '90', 'size': '0', 'files_sec': '1.5', 'app_overhead': '225379', 'creatMin': '418652', 'creatAbg': '633652', 'creatMax': '1053823', 'writeMin': '1616', 'writeAbg': '3947', 'writeMax': '18033', 'fsyncMin': '0', 'fsyncAbg': '0', 'fsyncMax': '0', 'syncMin': '0', 'syncAbg': '0', 'syncMax': '0', 'closeMin': '2179', 'closeAbg': '4394', 'closeMax': '10715', 'unlinkMin': '0', 'unlinkAbg': '0', 'unlinkMax': '0'}
28 100 0 1.2 226891 477213 774066 1200110 1676 3605 8894 0 0 0 0 0 0 1573 3729 10540 0 0 0
{'f_use': '28', 'count': '100', 'size': '0', 'files_sec': '1.2', 'app_overhead': '226891', 'creatMin': '477213', 'creatAbg': '774066', 'creatMax': '1200110', 'writeMin': '1676', 'writeAbg': '3605', 'writeMax': '8894', 'fsyncMin': '0', 'fsyncAbg': '0', 'fsyncMax': '0', 'syncMin': '0', 'syncAbg': '0', 'syncMax': '0', 'closeMin': '1573', 'closeAbg': '3729', 'closeMax': '10540', 'unlinkMin': '0', 'unlinkAbg': '0', 'unlinkMax': '0'}
Average Files/sec: 1.0
{} There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The python harness is being run on host - I assume You've been testing the target Regex expressions are hard on CPU (if you don't construct them carefully you could even have O(2^n) complexity!) - so a rule of thumb is to avoid them if they're not necessary (and better use simple ones when not). Please note that intensive regex matching might impact the test total time being measured (not much, but still). Current approach with splitting the line is better CPU-wise, could probably still be optimized in regards to python syntax, eg: TYPES = [ "f_use", "count", "size", "files_sec", "app_overhead", "creatMin", "creatAbg", "creatMax", "writeMin", "writeAbg", "writeMax", "fsyncMin", "fsyncAbg", "fsyncMax", "syncMin", "syncAbg", "syncMax", "closeMin", "closeAbg", "closeMax", "unlinkMin", "unlinkAbg", "unlinkMax", ]
# NOTE: used `float` as files_sec is a float number, doesn't really harm other stats
line_dict = dict(zip(TYPES, map(float, line.split())))
# if you need partial-only line_dict - use list splitting, eg:
line_dict = dict(zip(TYPES[20:22], map(float, line.split()[20:22]))) |
||||||||||||||||||||||||||
NO_SPC = r"Insufficient free space.+?\r+\n" | ||||||||||||||||||||||||||
END = r"Average Files/sec:.+?\r+\n" | ||||||||||||||||||||||||||
ERR = r".+?(?P<err>EIO|ENOSPC|ENOMEM).+?" | ||||||||||||||||||||||||||
NO_CONT_BLOCK = r"(?P<msg>Lack of contiguous memory block of size \d+ bytes.+?)\r+\n" | ||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
while True: | ||||||||||||||||||||||||||
if loop_start and loop_end: | ||||||||||||||||||||||||||
loop_time = 3 * (loop_end - loop_start) | ||||||||||||||||||||||||||
loop_start = None | ||||||||||||||||||||||||||
loop_end = None | ||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
try: | ||||||||||||||||||||||||||
idx = dut.expect([ | ||||||||||||||||||||||||||
NAME, | ||||||||||||||||||||||||||
MSG_LINE, | ||||||||||||||||||||||||||
NO_SPC, | ||||||||||||||||||||||||||
END, | ||||||||||||||||||||||||||
NO_CONT_BLOCK, | ||||||||||||||||||||||||||
ERR | ||||||||||||||||||||||||||
], timeout=loop_time) | ||||||||||||||||||||||||||
parsed = dut.match.groupdict() | ||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
except pexpect.TIMEOUT: | ||||||||||||||||||||||||||
test_msg = 'Got timeout, probably fs hang-up' | ||||||||||||||||||||||||||
test_status = Status.FAIL | ||||||||||||||||||||||||||
break | ||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
if idx == 0: | ||||||||||||||||||||||||||
test_name = parsed["name"] | ||||||||||||||||||||||||||
loop_start = time.time() | ||||||||||||||||||||||||||
elif idx == 1: | ||||||||||||||||||||||||||
first_loop_done = True | ||||||||||||||||||||||||||
splitted_line = parsed["line"].split() | ||||||||||||||||||||||||||
f_use = splitted_line[0] | ||||||||||||||||||||||||||
count = splitted_line[1] | ||||||||||||||||||||||||||
size = splitted_line[2] | ||||||||||||||||||||||||||
files_sec = splitted_line[3] | ||||||||||||||||||||||||||
app_overhead = splitted_line[4] | ||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
line_dict = {} | ||||||||||||||||||||||||||
line_dict['creatMin'] = splitted_line[5] | ||||||||||||||||||||||||||
line_dict['creatAvg'] = splitted_line[6] | ||||||||||||||||||||||||||
line_dict['creatMax'] = splitted_line[7] | ||||||||||||||||||||||||||
line_dict['writeMin'] = splitted_line[8] | ||||||||||||||||||||||||||
line_dict['writeAvg'] = splitted_line[9] | ||||||||||||||||||||||||||
line_dict['writeMax'] = splitted_line[10] | ||||||||||||||||||||||||||
line_dict['closeMin'] = splitted_line[17] | ||||||||||||||||||||||||||
line_dict['closeAvg'] = splitted_line[18] | ||||||||||||||||||||||||||
line_dict['closeMax'] = splitted_line[19] | ||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
# If files are kept, fs_mark will unlink them so unlink time can be caught | ||||||||||||||||||||||||||
if not ('-k' in test_name or '-L' in test_name or '-F' in test_name): | ||||||||||||||||||||||||||
line_dict['unlinkMin'] = splitted_line[20] | ||||||||||||||||||||||||||
line_dict['unlinkAvg'] = splitted_line[21] | ||||||||||||||||||||||||||
line_dict['unlinkMax'] = splitted_line[22] | ||||||||||||||||||||||||||
Comment on lines
+112
to
+115
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I would resign from checking what args were used, just check whether There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think it is more readable and clean right now. |
||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
for name, value in line_dict.items(): | ||||||||||||||||||||||||||
if not t_data.timing_dict[(target, name)][0] <= int(value) <= t_data.timing_dict[(target, name)][1]: | ||||||||||||||||||||||||||
adamdebek marked this conversation as resolved.
Show resolved
Hide resolved
|
||||||||||||||||||||||||||
test_status = Status.FAIL | ||||||||||||||||||||||||||
test_msg += ''.join(('\n\t', name, ' exec time - ', value, ' out of range [', | ||||||||||||||||||||||||||
str(t_data.timing_dict[( | ||||||||||||||||||||||||||
target, name)][0]), ' - ', | ||||||||||||||||||||||||||
str(t_data.timing_dict[(target, name)][1]), ']')) | ||||||||||||||||||||||||||
Comment on lines
+118
to
+123
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Maybe this approach will be more readable ?:
Suggested change
|
||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
if test_status == Status.FAIL: | ||||||||||||||||||||||||||
test_msg += "\n\n\tF_Use%: " + str(f_use) | ||||||||||||||||||||||||||
test_msg += "\n\tCount: " + str(count) | ||||||||||||||||||||||||||
test_msg += "\n\tSize: " + str(size) | ||||||||||||||||||||||||||
test_msg += "\n\tFiles/sec: " + str(files_sec) | ||||||||||||||||||||||||||
test_msg += "\n\tApp overhead: " + str(app_overhead) + "\n\t" | ||||||||||||||||||||||||||
Comment on lines
+126
to
+130
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. What do you think about this? :
Suggested change
|
||||||||||||||||||||||||||
break | ||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
elif idx in [2, 3, 4]: | ||||||||||||||||||||||||||
# Tests have to run at least 1 loop | ||||||||||||||||||||||||||
if not first_loop_done: | ||||||||||||||||||||||||||
test_status = Status.FAIL | ||||||||||||||||||||||||||
if idx == 2: | ||||||||||||||||||||||||||
test_msg = 'Insufficient free space' | ||||||||||||||||||||||||||
elif idx == 3: | ||||||||||||||||||||||||||
loop_end = time.time() | ||||||||||||||||||||||||||
test_msg = 'Got no timings' | ||||||||||||||||||||||||||
elif idx == 4: | ||||||||||||||||||||||||||
test_msg = parsed['msg'] | ||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
break | ||||||||||||||||||||||||||
elif idx == 5: | ||||||||||||||||||||||||||
test_msg = parsed['err'] | ||||||||||||||||||||||||||
test_status = Status.FAIL | ||||||||||||||||||||||||||
break | ||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
ret = clean(dut, test_name, ctx) | ||||||||||||||||||||||||||
if ret < 0 and first_loop_done: | ||||||||||||||||||||||||||
test_status = Status.FAIL | ||||||||||||||||||||||||||
if ret == -1: | ||||||||||||||||||||||||||
test_msg = 'Error while cleaning' | ||||||||||||||||||||||||||
elif ret == -2: | ||||||||||||||||||||||||||
test_msg = 'Timeout during cleaning' | ||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
return TestResult(msg=test_msg, status=test_status) |
Original file line number | Diff line number | Diff line change | ||
---|---|---|---|---|
@@ -0,0 +1,132 @@ | ||||
# This yaml contain armv7a7-imx6ull-evk specific fs_mark tests | ||||
test: | ||||
adamdebek marked this conversation as resolved.
Show resolved
Hide resolved
|
||||
type: harness | ||||
harness: fs_mark.py | ||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. As we discussed I would consider adding some nested cases to provide better coverage. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This tests are not straight forward to implement, maybe it is better to add it later on. |
||||
# nightly: true | ||||
|
||||
targets: | ||||
value: [armv7a7-imx6ull-evk] | ||||
|
||||
# TODO: full fs filling tests | ||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Are there any specific problems with such types of test? If there are any issues, it might be worth mentioning them. If not, I would leave it as it is There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I will add issue about it. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Is it already added? |
||||
tests: | ||||
# empty files | ||||
- name: fs_mark_emptyFiles | ||||
execute: fs_mark -d /fs_mark_test -s 0 -n 100 -v -S 0 -L 50 | ||||
|
||||
- name: fs_mark_emptyFiles_5_dirs | ||||
execute: fs_mark -d /fs_mark_test -D 5 -N 20 -s 0 -n 100 -v -S 0 -L 50 | ||||
|
||||
- name: fs_mark_emptyFiles_100_dirs | ||||
execute: fs_mark -d /fs_mark_test -D 100 -N 1 -s 0 -n 100 -v -S 0 -L 50 | ||||
|
||||
- name: fs_mark_emptyFiles_5_threads | ||||
execute: fs_mark -d /fs_mark_test -t 5 -D 4 -N 20 -s 0 -n 100 -v -S 0 -L 50 | ||||
|
||||
- name: fs_mark_emptyFiles_64_threads | ||||
execute: fs_mark -d /fs_mark_test -t 64 -D 64 -N 1 -s 0 -n 64 -v -S 0 -L 4 | ||||
|
||||
# big file | ||||
- name: fs_mark_bigFile_allignedWrite | ||||
execute: fs_mark -d /fs_mark_test -s 2000000 -w 32 -n 1 -v -S 0 | ||||
|
||||
- name: fs_mark_bigFile_notAllignedWrite | ||||
execute: fs_mark -d /fs_mark_test -s 2000000 -w 113 -n 1 -v -S 0 | ||||
|
||||
- name: fs_mark_bigFile_bigWrite | ||||
execute: fs_mark -d /fs_mark_test -s 2000000 -w 1024 -n 1 -v -S 0 | ||||
|
||||
# s=256 w=32 | ||||
- name: fs_mark_smallFiles_alignedWrite | ||||
execute: fs_mark -d /fs_mark_test -s 256 -w 32 -n 100 -v -S 0 -L 30 | ||||
|
||||
- name: fs_mark_smallFiles_alignedWrite_5_dirs | ||||
execute: fs_mark -d /fs_mark_test -D 5 -N 20 -s 256 -w 32 -n 100 -v -S 0 -L 30 | ||||
|
||||
- name: fs_mark_smallFiles_alignedWrite_100_dirs | ||||
execute: fs_mark -d /fs_mark_test -D 100 -N 1 -s 256 -w 32 -n 100 -v -S 0 -L 30 | ||||
|
||||
- name: fs_mark_smallFiles_alignedWrite_5_threads | ||||
execute: fs_mark -d /fs_mark_test -t 5 -D 5 -N 20 -s 256 -w 32 -n 100 -v -S 0 -L 30 | ||||
|
||||
- name: fs_mark_smallFiles_alignedWrite_64_threads | ||||
execute: fs_mark -d /fs_mark_test -t 64 -D 64 -N 1 -s 256 -w 32 -n 64 -v -S 0 -L 4 | ||||
|
||||
# s=4096 w=32 | ||||
- name: fs_mark_pageSizeFiles_alignedWrite | ||||
execute: fs_mark -d /fs_mark_test -s 4096 -w 32 -n 10 -v -S 0 -L 30 | ||||
|
||||
- name: fs_mark_pageSizeFiles_alignedWrite_5_dirs | ||||
execute: fs_mark -d /fs_mark_test -D 5 -N 2 -s 4096 -w 32 -n 10 -v -S 0 -L 30 | ||||
|
||||
- name: fs_mark_pageSizeFiles_alignedWrite_25_dirs | ||||
execute: fs_mark -d /fs_mark_test -D 25 -N 1 -s 4096 -w 32 -n 25 -v -S 0 -L 30 | ||||
|
||||
- name: fs_mark_pageSizeFiles_alignedWrite_5_threads | ||||
execute: fs_mark -d /fs_mark_test -t 5 -D 5 -N 2 -s 4096 -w 32 -n 10 -v -S 0 -L 15 | ||||
|
||||
- name: fs_mark_pageSizeFiles_alignedWrite_20_threads | ||||
execute: fs_mark -d /fs_mark_test -t 20 -D 20 -N 1 -s 4096 -w 32 -n 20 -v -S 0 -L 5 | ||||
|
||||
# s=10000 w=32 | ||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. to remove (?)
Suggested change
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Same with the other places. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think it can remain, this comment show how this tests are organized. Full fs filling tests are not added for now due to difficulty of checking how much space is left on jffs2. |
||||
- name: fs_mark_bigFiles_alignedWrite | ||||
execute: fs_mark -d /fs_mark_test -s 10000 -w 32 -n 10 -v -S 0 -L 30 | ||||
|
||||
- name: fs_mark_bigFiles_alignedWrite_5_dirs | ||||
execute: fs_mark -d /fs_mark_test -D 5 -N 2 -s 10000 -w 32 -n 10 -v -S 0 -L 30 | ||||
|
||||
- name: fs_mark_bigFiles_alignedWrite_25_dirs | ||||
execute: fs_mark -d /fs_mark_test -D 25 -N 1 -s 10000 -w 32 -n 25 -v -S 0 -L 10 | ||||
|
||||
- name: fs_mark_bigFiles_alignedWrite_5_threads | ||||
execute: fs_mark -d /fs_mark_test -t 5 -D 5 -N 2 -s 10000 -w 32 -n 10 -v -S 0 -L 10 | ||||
|
||||
- name: fs_mark_bigFiles_alignedWrite_10_threads | ||||
execute: fs_mark -d /fs_mark_test -t 10 -D 10 -N 1 -s 10000 -w 32 -n 10 -v -S 0 -L 5 | ||||
|
||||
# s=256 w=113 | ||||
- name: fs_mark_smallFiles_notAlignedWrite | ||||
execute: fs_mark -d /fs_mark_test -s 256 -w 113 -n 100 -v -S 0 -L 30 | ||||
|
||||
- name: fs_mark_smallFiles_notAlignedWrite_5_dirs | ||||
execute: fs_mark -d /fs_mark_test -D 5 -N 20 -s 256 -w 113 -n 100 -v -S 0 -L 30 | ||||
|
||||
- name: fs_mark_smallFiles_notAlignedWrite_100_dirs | ||||
execute: fs_mark -d /fs_mark_test -D 100 -N 1 -s 256 -w 113 -n 100 -v -S 0 -L 30 | ||||
|
||||
- name: fs_mark_smallFiles_notAlignedWrite_5_threads | ||||
execute: fs_mark -d /fs_mark_test -t 5 -D 5 -N 20 -s 256 -w 113 -n 100 -v -S 0 -L 30 | ||||
|
||||
- name: fs_mark_smallFiles_notAlignedWrite_64_threads | ||||
execute: fs_mark -d /fs_mark_test -t 64 -D 64 -N 1 -s 256 -w 113 -n 64 -v -S 0 -L 3 | ||||
|
||||
# # s=4096 w=113 | ||||
- name: fs_mark_pageSizeFiles_notAlignedWrite | ||||
execute: fs_mark -d /fs_mark_test -s 4096 -w 113 -n 10 -v -S 0 -L 30 | ||||
|
||||
- name: fs_mark_pageSizeFiles_notAlignedWrite_5_dirs | ||||
execute: fs_mark -d /fs_mark_test -D 5 -N 2 -s 4096 -w 113 -n 10 -v -S 0 -L 30 | ||||
|
||||
- name: fs_mark_pageSizeFiles_notAlignedWrite_25_dirs | ||||
execute: fs_mark -d /fs_mark_test -D 25 -N 1 -s 4096 -w 113 -n 25 -v -S 0 -L 30 | ||||
|
||||
- name: fs_mark_pageSizeFiles_notAlignedWrite_5_threads | ||||
execute: fs_mark -d /fs_mark_test -t 5 -D 5 -N 2 -s 4096 -w 113 -n 10 -v -S 0 -L 15 | ||||
|
||||
- name: fs_mark_pageSizeFiles_notAlignedWrite_20_threads | ||||
execute: fs_mark -d /fs_mark_test -t 20 -D 20 -N 1 -s 4096 -w 113 -n 20 -v -S 0 -L 5 | ||||
|
||||
# # s=10000 w=113 | ||||
- name: fs_mark_bigFiles_notAlignedWrite | ||||
execute: fs_mark -d /fs_mark_test -s 10000 -w 113 -n 10 -v -S 0 -L 30 | ||||
|
||||
- name: fs_mark_bigFiles_notAlignedWrite_5_dirs | ||||
execute: fs_mark -d /fs_mark_test -D 5 -N 2 -s 10000 -w 113 -n 10 -v -S 0 -L 30 | ||||
|
||||
- name: fs_mark_bigFiles_notAlignedWrite_25_dirs | ||||
execute: fs_mark -d /fs_mark_test -D 25 -N 1 -s 10000 -w 113 -n 25 -v -S 0 -L 20 | ||||
|
||||
- name: fs_mark_bigFiles_notAlignedWrite_5_threads | ||||
execute: fs_mark -d /fs_mark_test -t 5 -D 5 -N 2 -s 10000 -w 113 -n 10 -v -S 0 -L 10 | ||||
|
||||
- name: fs_mark_bigFiles_notAlignedWrite_10_threads | ||||
execute: fs_mark -d /fs_mark_test -t 10 -D 10 -N 1 -s 10000 -w 113 -n 10 -v -S 0 -L 5 |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I'm considering whether it might be better to rename the macro, as it's not actually a test after all.. However, we could discuss this during something lika a repository cleanup/organization, not directly here.