From abb473ce7b3593aad51d3162941b5d977d8ad31f Mon Sep 17 00:00:00 2001
From: wang_zyuan <12333533+love-in-the-holy-land@user.noreply.gitee.com>
Date: Fri, 18 Jul 2025 07:05:54 +0800
Subject: [PATCH 1/7] Added instance oe_test_oeAware_61.sh
---
suite2cases/oeaware_temp.json | 8 +++
.../oeaware/oe_test_oeAware_61.sh | 59 +++++++++++++++++++
2 files changed, 67 insertions(+)
create mode 100644 suite2cases/oeaware_temp.json
create mode 100644 testcases/feature-test/oeaware/oe_test_oeAware_61.sh
diff --git a/suite2cases/oeaware_temp.json b/suite2cases/oeaware_temp.json
new file mode 100644
index 000000000..478280a00
--- /dev/null
+++ b/suite2cases/oeaware_temp.json
@@ -0,0 +1,8 @@
+{
+ "path": "$OET_PATH/testcases/feature-test/oeaware",
+ "cases": [
+ {
+ "name": "oe_test_oeAware_61"
+ }
+ ]
+}
diff --git a/testcases/feature-test/oeaware/oe_test_oeAware_61.sh b/testcases/feature-test/oeaware/oe_test_oeAware_61.sh
new file mode 100644
index 000000000..f092b6573
--- /dev/null
+++ b/testcases/feature-test/oeaware/oe_test_oeAware_61.sh
@@ -0,0 +1,59 @@
+#!/usr/bin/bash
+
+source "${OET_PATH}"/libs/locallibs/common_lib.sh
+
+function pre_test(){
+ script_name=$(basename "$0")
+ LOG_INFO "Start to prepare test environment for ${script_name}"
+ if /etc/oeAware/analysis_config.yaml["${NODE1_FRAME}"="x86_64"];then
+ echo "the environment does not support testing"
+ exit 255
+ fi
+ DNF_INSTALL "oeAware-manager"
+ systemctl restart oeaware
+ cp /etc/oeAware/analysis_config.yaml /etc/oeAware/analysis_config.yaml_bak
+ LOG_INFO "End to prepare the test environment"
+}
+function run_test(){
+ LOG_INFO "Start to run the test."
+ yaml_dir=/etc/oeAware/analysis_config.yaml
+ # default threshold
+ oeawarectl analysis -t 3 2>&1 | grep -E "Warn:.*docker_coordination_burst:(host|docker)_cpu_usage_threshold"
+ CHECK_RESULT $? 0 1 "Check docker threshold failed."
+ # change host threshold
+ sed -i '/^ host_cpu_usage_threshold:/ s/[-?0-9]\+/1/' $yaml_dir
+ oeawarectl analysis -t 3 2>&1 | grep -E "Warn:.*docker_coordination_burst:(host|docker)_cpu_usage_threshold"
+ CHECK_RESULT $? 0 1 "Check docker threshold failed."
+ sed -i '/^ host_cpu_usage_threshold:/ s/[-?0-9]\+/100/' $yaml_dir
+ oeawarectl analysis -t 3 2>&1 | grep -E "Warn:.*docker_coordination_burst:(host|docker)_cpu_usage_threshold"
+ CHECK_RESULT $? 0 1 "Check docker threshold failed."
+ sed -i '/^ host_cpu_usage_threshold:/ s/[-?0-9]\+/-1/' $yaml_dir
+ oeawarectl analysis -t 3 2>&1 | grep -E "Warn:.*docker_coordination_burst:host_cpu_usage_threshold"
+ CHECK_RESULT $? 0 0 "Check docker threshold failed."
+ sed -i '/^ host_cpu_usage_threshold:/ s/[-?0-9]\+/101/' $yaml_dir
+ oeawarectl analysis -t 3 2>&1 | grep -E "Warn:.*docker_coordination_burst:host_cpu_usage_threshold"
+ CHECK_RESULT $? 0 0 "Check docker threshold failed."
+ sed -i '/^ host_cpu_usage_threshold:/ s/[-?0-9]\+/45/' $yaml_dir
+ # change docker threshold
+ sed -i '/^ docker_cpu_usage_threshold:/ s/[-?0-9]\+/1/' $yaml_dir
+ oeawarectl analysis -t 3 2>&1 | grep -E "Warn:.*docker_coordination_burst:(host|docker)_cpu_usage_threshold"
+ CHECK_RESULT $? 0 1 "Check docker threshold failed."
+ sed -i '/^ docker_cpu_usage_threshold:/ s/[-?0-9]\+/100/' $yaml_dir
+ oeawarectl analysis -t 3 2>&1 | grep -E "Warn:.*docker_coordination_burst:(host|docker)_cpu_usage_threshold"
+ CHECK_RESULT $? 0 1 "Check docker threshold failed."
+ sed -i '/^ docker_cpu_usage_threshold:/ s/[-?0-9]\+/-1/' $yaml_dir
+ oeawarectl analysis -t 3 2>&1 | grep -E "Warn:.*docker_coordination_burst:docker_cpu_usage_threshold"
+ CHECK_RESULT $? 0 0 "Check docker threshold failed."
+ sed -i '/^ docker_cpu_usage_threshold:/ s/[-?0-9]\+/101/' $yaml_dir
+ oeawarectl analysis -t 3 2>&1 | grep -E "Warn:.*docker_coordination_burst:docker_cpu_usage_threshold"
+ CHECK_RESULT $? 0 0 "Check docker threshold failed."
+ LOG_INFO "End to run the test."
+}
+function post_test(){
+ LOG_INFO "Start to restore the test environment"
+ mv /etc/oeAware/analysis_config.yaml_bak /etc/oeAware/analysis_config.yaml
+ DNF_REMOVE "$@"
+ LOG_INFO "End to restore the test environment"
+}
+
+main "$@"
--
Gitee
From 85d0c9a8aded64323e30149a7017b1a275fe043f Mon Sep 17 00:00:00 2001
From: 13906515865
Date: Thu, 24 Jul 2025 10:47:43 +0800
Subject: [PATCH 2/7] Added oeaware_test 61,72~76,83~83,87
---
suite2cases/oeaware_temp.json | 2 +-
temp_txt/suggestion.txt | 18 +++++++
temp_txt/suggestion_new.txt | 18 +++++++
testcases/feature-test/oeaware/numafast | 0
.../oeaware/oe_test_oeAware_67.sh | 24 +++++++++
.../oeaware/oe_test_oeAware_72.sh | 31 +++++++++++
.../oeaware/oe_test_oeAware_73.sh | 32 ++++++++++++
.../oeaware/oe_test_oeAware_74.sh | 35 +++++++++++++
.../oeaware/oe_test_oeAware_75.sh | 34 +++++++++++++
.../oeaware/oe_test_oeAware_76.sh | 45 ++++++++++++++++
.../oeaware/oe_test_oeAware_81.sh | 34 +++++++++++++
.../oeaware/oe_test_oeAware_82.sh | 38 ++++++++++++++
.../oeaware/oe_test_oeAware_83.sh | 35 +++++++++++++
.../oeaware/oe_test_oeAware_87.sh | 51 +++++++++++++++++++
14 files changed, 396 insertions(+), 1 deletion(-)
create mode 100644 temp_txt/suggestion.txt
create mode 100644 temp_txt/suggestion_new.txt
create mode 100644 testcases/feature-test/oeaware/numafast
create mode 100644 testcases/feature-test/oeaware/oe_test_oeAware_67.sh
create mode 100644 testcases/feature-test/oeaware/oe_test_oeAware_72.sh
create mode 100644 testcases/feature-test/oeaware/oe_test_oeAware_73.sh
create mode 100644 testcases/feature-test/oeaware/oe_test_oeAware_74.sh
create mode 100644 testcases/feature-test/oeaware/oe_test_oeAware_75.sh
create mode 100644 testcases/feature-test/oeaware/oe_test_oeAware_76.sh
create mode 100644 testcases/feature-test/oeaware/oe_test_oeAware_81.sh
create mode 100644 testcases/feature-test/oeaware/oe_test_oeAware_82.sh
create mode 100644 testcases/feature-test/oeaware/oe_test_oeAware_83.sh
create mode 100644 testcases/feature-test/oeaware/oe_test_oeAware_87.sh
diff --git a/suite2cases/oeaware_temp.json b/suite2cases/oeaware_temp.json
index 478280a00..8315f6d16 100644
--- a/suite2cases/oeaware_temp.json
+++ b/suite2cases/oeaware_temp.json
@@ -2,7 +2,7 @@
"path": "$OET_PATH/testcases/feature-test/oeaware",
"cases": [
{
- "name": "oe_test_oeAware_61"
+ "name": "oe_test_oeAware_73"
}
]
}
diff --git a/temp_txt/suggestion.txt b/temp_txt/suggestion.txt
new file mode 100644
index 000000000..e84345fc3
--- /dev/null
+++ b/temp_txt/suggestion.txt
@@ -0,0 +1,18 @@
+==================================================Analysis Suggestion===================================================
++---------------------+---------------------+---------------------+
+|suggestion |operation |result |
++---------------------+---------------------+---------------------+
+|Use numafast |step 1: if `oeaware |Optimizes memory acc |
+| |ctl -q | grep tune_n |ess locality with lo |
+| |uma_mem_access` not |w scheduling overhea |
+| |exist, install numaf |d |
+| |ast | |
+| |install : `oeawarect | |
+| |l -i numafast` | |
+| |load : `oeawarect | |
+| |l -l libtune_numa.so | |
+| |` | |
+| |step 2: enable insta | |
+| |nce `oeaware -e tune | |
+| |_numa_mem_access` | |
++---------------------+---------------------+---------------------+
diff --git a/temp_txt/suggestion_new.txt b/temp_txt/suggestion_new.txt
new file mode 100644
index 000000000..e84345fc3
--- /dev/null
+++ b/temp_txt/suggestion_new.txt
@@ -0,0 +1,18 @@
+==================================================Analysis Suggestion===================================================
++---------------------+---------------------+---------------------+
+|suggestion |operation |result |
++---------------------+---------------------+---------------------+
+|Use numafast |step 1: if `oeaware |Optimizes memory acc |
+| |ctl -q | grep tune_n |ess locality with lo |
+| |uma_mem_access` not |w scheduling overhea |
+| |exist, install numaf |d |
+| |ast | |
+| |install : `oeawarect | |
+| |l -i numafast` | |
+| |load : `oeawarect | |
+| |l -l libtune_numa.so | |
+| |` | |
+| |step 2: enable insta | |
+| |nce `oeaware -e tune | |
+| |_numa_mem_access` | |
++---------------------+---------------------+---------------------+
diff --git a/testcases/feature-test/oeaware/numafast b/testcases/feature-test/oeaware/numafast
new file mode 100644
index 000000000..e69de29bb
diff --git a/testcases/feature-test/oeaware/oe_test_oeAware_67.sh b/testcases/feature-test/oeaware/oe_test_oeAware_67.sh
new file mode 100644
index 000000000..85c60696f
--- /dev/null
+++ b/testcases/feature-test/oeaware/oe_test_oeAware_67.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/bash
+
+source "${OET_PATH}"/testcases/feature-test/oeaware/common/common.sh
+
+function pre_test() {
+ LOG_INFO "Start to prepare the test environment."
+ systemctl restart oeaware
+ cp
+ LOG_INFO "End to prepare the test environment."
+}
+
+function run_test() {
+ LOG_INFO "Start to run test."
+
+ LOG_INFO "End to run test."
+}
+
+function post_test() {
+ LOG_INFO "Start to restore the test environment."
+
+ LOG_INFO "End to restore the test environment."
+}
+
+main "$@"
diff --git a/testcases/feature-test/oeaware/oe_test_oeAware_72.sh b/testcases/feature-test/oeaware/oe_test_oeAware_72.sh
new file mode 100644
index 000000000..26650026d
--- /dev/null
+++ b/testcases/feature-test/oeaware/oe_test_oeAware_72.sh
@@ -0,0 +1,31 @@
+#!/usr/bin/bash
+
+source "${OET_PATH}"/testcases/feature-test/oeaware/common/common.sh
+
+function pre_test() {
+ LOG_INFO "Start to prepare the test environment."
+ systemctl restart oeaware
+ # cp /etc/oeAware/analysis_config.yaml /etc/oeAware/analysis_config.yaml_bak
+ LOG_INFO "End to prepare the test environment."
+}
+
+function run_test() {
+ LOG_INFO "Start to run test."
+ oeawarectl analysis -t 3 | grep "use dynamic smt"
+ CHECK_RESULT $? 0 0 "Check SMT suggestion failed."
+ local cpu_count=$(lscpu | grep -w 'CPU:' | awk '{print $2}')
+ local thresh=$(awk '/dynamic_smt:/ {getline; print $2}' /etc/oeAware/analysis_config.yaml)
+ stress-ng --cpu $(( cpu_count*(thresh+3)/100 )) -t 30 &
+ # stress-ng --cpu $cpu_count -t 30 &
+ oeawarectl analysis -t 3 | grep "use dynamic smt"
+ CHECK_RESULT $? 0 1 "Check SMT suggestion failed."
+ LOG_INFO "End to run test."
+}
+
+function post_test() {
+ LOG_INFO "Start to restore the test environment."
+
+ LOG_INFO "End to restore the test environment."
+}
+
+main "$@"
diff --git a/testcases/feature-test/oeaware/oe_test_oeAware_73.sh b/testcases/feature-test/oeaware/oe_test_oeAware_73.sh
new file mode 100644
index 000000000..574493762
--- /dev/null
+++ b/testcases/feature-test/oeaware/oe_test_oeAware_73.sh
@@ -0,0 +1,32 @@
+#!/usr/bin/bash
+
+source "${OET_PATH}"/testcases/feature-test/oeaware/common/common.sh
+
+function pre_test() {
+ LOG_INFO "Start to prepare the test environment."
+ systemctl restart oeaware
+ cp /etc/oeAware/analysis_config.yaml /etc/oeAware/analysis_config.yaml_bak
+ LOG_INFO "End to prepare the test environment."
+}
+
+function run_test() {
+ LOG_INFO "Start to run test."
+ local cpu_count=$(lscpu | grep -w 'CPU:' | awk '{print $2}')
+ local thresh=70
+ sed -i "/dynamic_smt:/ { n; s/\( *threshold:\)[[:space:]]*.*/ threshold: ${thresh}/; }" /etc/oeAware/analysis_config.yaml
+ oeawarectl analysis -t 3 | grep "use dynamic smt"
+ CHECK_RESULT $? 0 0 "Check SMT suggestion failed."
+ stress-ng --cpu $(( cpu_count*(thresh+3)/100 )) -t 30 &
+ # stress-ng --cpu $cpu_count -t 30 &
+ oeawarectl analysis -t 3 | grep "use dynamic smt"
+ CHECK_RESULT $? 0 1 "Check SMT suggestion failed."
+ LOG_INFO "End to run test."
+}
+
+function post_test() {
+ LOG_INFO "Start to restore the test environment."
+ mv /etc/oeAware/analysis_config.yaml_bak /etc/oeAware/analysis_config.yaml
+ LOG_INFO "End to restore the test environment."
+}
+
+main "$@"
diff --git a/testcases/feature-test/oeaware/oe_test_oeAware_74.sh b/testcases/feature-test/oeaware/oe_test_oeAware_74.sh
new file mode 100644
index 000000000..5c02a4f2e
--- /dev/null
+++ b/testcases/feature-test/oeaware/oe_test_oeAware_74.sh
@@ -0,0 +1,35 @@
+#!/usr/bin/bash
+
+source "${OET_PATH}"/testcases/feature-test/oeaware/common/common.sh
+
+function pre_test() {
+ LOG_INFO "Start to prepare the test environment."
+ systemctl restart oeaware
+ cp /etc/oeAware/analysis_config.yaml /etc/oeAware/analysis_config.yaml_bak
+ LOG_INFO "End to prepare the test environment."
+}
+
+function run_test() {
+ LOG_INFO "Start to run test."
+ sed -i '/^ threshold:/ s/\([0-9.-]\+\)/0/' /etc/oeAware/analysis_config.yaml
+ oeawarectl analysis -t 3 2>&1 | grep -E "Warn:"
+ CHECK_RESULT $? 0 1 "Check SMT threshold failed."
+ sed -i '/^ threshold:/ s/\([0-9.-]\+\)/100/' /etc/oeAware/analysis_config.yaml
+ oeawarectl analysis -t 3 2>&1 | grep -E "Warn:"
+ CHECK_RESULT $? 0 1 "Check SMT threshold failed."
+ sed -i '/^ threshold:/ s/\([0-9.-]\+\)/-1/' /etc/oeAware/analysis_config.yaml
+ oeawarectl analysis -t 3 2>&1 | grep -E "Warn: analysis config 'dynamic_smt:threshold\(-?[0-9]+\)' value must be \[0, 100\]"
+ CHECK_RESULT $? 0 0 "Check SMT threshold failed."
+ sed -i '/^ threshold:/ s/\([0-9.-]\+\)/101/' /etc/oeAware/analysis_config.yaml
+ oeawarectl analysis -t 3 2>&1 | grep -E "Warn: analysis config 'dynamic_smt:threshold\(-?[0-9]+\)' value must be \[0, 100\]"
+ CHECK_RESULT $? 0 0 "Check SMT threshold failed."
+ LOG_INFO "End to run test."
+}
+
+function post_test() {
+ LOG_INFO "Start to restore the test environment."
+ mv /etc/oeAware/analysis_config.yaml_bak /etc/oeAware/analysis_config.yaml
+ LOG_INFO "End to restore the test environment."
+}
+
+main "$@"
diff --git a/testcases/feature-test/oeaware/oe_test_oeAware_75.sh b/testcases/feature-test/oeaware/oe_test_oeAware_75.sh
new file mode 100644
index 000000000..dfc605931
--- /dev/null
+++ b/testcases/feature-test/oeaware/oe_test_oeAware_75.sh
@@ -0,0 +1,34 @@
+#!/usr/bin/bash
+
+source "${OET_PATH}"/testcases/feature-test/oeaware/common/common.sh
+
+function pre_test() {
+ LOG_INFO "Start to prepare the test environment."
+ systemctl restart oeaware
+ cp /etc/oeAware/analysis_config.yaml /etc/oeAware/analysis_config.yaml_bak
+ LOG_INFO "End to prepare the test environment."
+}
+
+function run_test() {
+ LOG_INFO "Start to run test."
+ oeawarectl analysis -t 3 2>&1 | grep -E "Warn:"
+ CHECK_RESULT $? 0 1 "Check SMT threshold failed."
+ sed -i '/^dynamic_smt:/,/^ /{ /threshold:/ s/\(threshold:\s*\)[^#]*\s*/\1IllegalInput / }' /etc/oeAware/analysis_config.yaml
+ oeawarectl analysis -t 3 2>&1 | grep -E "Warn: analysis config 'dynamic_smt:threshold\([^)]+\)' value must be a number"
+ CHECK_RESULT $? 0 0 "Check SMT threshold failed."
+ sed -i '/^dynamic_smt:/,/^ /{ /threshold:/ s/\(threshold:\s*\)[^#]*\s*/\11i45lA\& / }' /etc/oeAware/analysis_config.yaml
+ oeawarectl analysis -t 3 2>&1 | grep -E "Warn: analysis config 'dynamic_smt:threshold\([^)]+\)' value must be a number"
+ CHECK_RESULT $? 0 0 "Check SMT threshold failed."
+ sed -i '/^dynamic_smt:/,/^ /{ /threshold:/ s/\(threshold:\s*\)[^#]*\s*/\1$#\{\]?\> / }' /etc/oeAware/analysis_config.yaml
+ oeawarectl analysis -t 3 2>&1 | grep -E "Warn: analysis config 'dynamic_smt:threshold\([^)]+\)' value must be a number"
+ CHECK_RESULT $? 0 0 "Check SMT threshold failed."
+ LOG_INFO "End to run test."
+}
+
+function post_test() {
+ LOG_INFO "Start to restore the test environment."
+ mv /etc/oeAware/analysis_config.yaml_bak /etc/oeAware/analysis_config.yaml
+ LOG_INFO "End to restore the test environment."
+}
+
+main "$@"
diff --git a/testcases/feature-test/oeaware/oe_test_oeAware_76.sh b/testcases/feature-test/oeaware/oe_test_oeAware_76.sh
new file mode 100644
index 000000000..daccb08de
--- /dev/null
+++ b/testcases/feature-test/oeaware/oe_test_oeAware_76.sh
@@ -0,0 +1,45 @@
+#!/usr/bin/bash
+
+source "${OET_PATH}"/testcases/feature-test/oeaware/common/common.sh
+
+function pre_test() {
+ LOG_INFO "Start to prepare the test environment."
+ systemctl restart oeaware
+ temp_dir=$(mktemp -d)
+ mkdir "$temp_dir/low/"
+ mkdir "$temp_dir/high/"
+ LOG_INFO "End to prepare the test environment."
+}
+
+function run_test() {
+ LOG_INFO "Start to run test."
+ local test_rounds=5
+ local flag=0
+
+ oeawarectl analysis -t 3 | grep -A 100 'Analysis Suggestion' | awk '/^$/ {exit} 1' > "$temp_dir/low/suggestion_1.txt"
+ for ((i=2;i "$temp_dir/low/suggestion_$i.txt"
+ diff "$temp_dir/low/suggestion_${i-1}.txt" "$temp_dir/low/suggestion_$i.txt"
+ CHECK_RESULT $? 0 0 "Check repeated analysis suggestion failed"
+ done
+
+ stress-ng --cpu 6 -t 60 &
+ SLEEP 5
+ oeawarectl analysis -t 3 | grep -A 100 'Analysis Suggestion' | awk '/^$/ {exit} 1' > "$temp_dir/high/suggestion_1.txt"
+ for ((i=2;i "$temp_dir/high/suggestion_$i.txt"
+ diff "$temp_dir/high/suggestion_${i-1}.txt" "$temp_dir/high/suggestion_$i.txt"
+ CHECK_RESULT $? 0 0 "Check repeated analysis suggestion failed"
+ done
+ LOG_INFO "End to run test."
+}
+
+function post_test() {
+ LOG_INFO "Start to restore the test environment."
+ rm -rf ${temp_dir}
+ LOG_INFO "End to restore the test environment."
+}
+
+main "$@"
diff --git a/testcases/feature-test/oeaware/oe_test_oeAware_81.sh b/testcases/feature-test/oeaware/oe_test_oeAware_81.sh
new file mode 100644
index 000000000..d2a8c400a
--- /dev/null
+++ b/testcases/feature-test/oeaware/oe_test_oeAware_81.sh
@@ -0,0 +1,34 @@
+#!/usr/bin/bash
+
+source "${OET_PATH}"/testcases/feature-test/oeaware/common/common.sh
+
+function pre_test() {
+ LOG_INFO "Start to prepare the test environment."
+ systemctl restart oeaware
+
+ LOG_INFO "End to prepare the test environment."
+}
+
+function run_test() {
+ LOG_INFO "Start to run test."
+ oeawarectl -e dynamic_smt_tune -threshold 0
+ grep -q "^0" /proc/sys/kernel/sched_util_ratio
+ CHECK_RESULT $? 0 0 "Check SMT by threshold failed"
+ oeawarectl -d dynamic_smt_tune | grep "Instance disabled successfully"
+ CHECK_RESULT $? 0 0 "Check SMT by threshold failed"
+ oeawarectl -e dynamic_smt_tune -threshold -1 2>&1 | grep "Instance enabled failed, because the threshold range is \[0, 100\], but is -1"
+ CHECK_RESULT $? 0 0 "Check SMT beyond threshold failed"
+ oeawarectl -d dynamic_smt_tune | grep "Instance disabled successfully"
+ oeawarectl -e dynamic_smt_tune -threshold 101 2>&1 | grep "Instance enabled failed, because the threshold range is \[0, 100\], but is 101"
+ CHECK_RESULT $? 0 0 "Check SMT beyond threshold failed"
+ oeawarectl -d dynamic_smt_tune | grep "Instance disabled successfully"
+ LOG_INFO "End to run test."
+}
+
+function post_test() {
+ LOG_INFO "Start to restore the test environment."
+
+ LOG_INFO "End to restore the test environment."
+}
+
+main "$@"
diff --git a/testcases/feature-test/oeaware/oe_test_oeAware_82.sh b/testcases/feature-test/oeaware/oe_test_oeAware_82.sh
new file mode 100644
index 000000000..8e9cec32a
--- /dev/null
+++ b/testcases/feature-test/oeaware/oe_test_oeAware_82.sh
@@ -0,0 +1,38 @@
+#!/usr/bin/bash
+
+source "${OET_PATH}"/testcases/feature-test/oeaware/common/common.sh
+
+function pre_test() {
+ LOG_INFO "Start to prepare the test environment."
+ systemctl restart oeaware
+
+ LOG_INFO "End to prepare the test environment."
+}
+
+function run_test() {
+ LOG_INFO "Start to run test."
+ oeawarectl -e dynamic_smt_tune | grep "Instance enabled successfully"
+ CHECK_RESULT $? 0 0 "Check SMT failed"
+ oeawarectl -d dynamic_smt_tune | grep "Instance disabled successfully"
+ oeawarectl -e dynamic_smt_tune -threshold "IllegalInputs" | grep "Instance enabled failed, because threshold value is not a integer"
+ CHECK_RESULT $? 0 0 "Check SMT invalid threshold failed"
+ oeawarectl -d dynamic_smt_tune
+ oeawarectl -e dynamic_smt_tune -threshold "%1!4@5\(IA?" | grep "Instance enabled failed, because threshold value is not a integer"
+ CHECK_RESULT $? 0 0 "Check SMT invalid threshold failed"
+ oeawarectl -d dynamic_smt_tune
+ oeawarectl -e dynamic_smt_tune -threshold "@\>1\&:\}!?" | grep "Instance enabled failed, because threshold value is not a integer"
+ CHECK_RESULT $? 0 0 "Check SMT invalid threshold failed"
+ oeawarectl -d dynamic_smt_tune
+ oeawarectl -e dynamic_smt_tune -threshold 191.801 | grep "Instance enabled failed, because threshold value is not a integer"
+ CHECK_RESULT $? 0 0 "Check SMT invalid threshold failed"
+ oeawarectl -d dynamic_smt_tune
+ LOG_INFO "End to run test."
+}
+
+function post_test() {
+ LOG_INFO "Start to restore the test environment."
+
+ LOG_INFO "End to restore the test environment."
+}
+
+main "$@"
diff --git a/testcases/feature-test/oeaware/oe_test_oeAware_83.sh b/testcases/feature-test/oeaware/oe_test_oeAware_83.sh
new file mode 100644
index 000000000..711d4f745
--- /dev/null
+++ b/testcases/feature-test/oeaware/oe_test_oeAware_83.sh
@@ -0,0 +1,35 @@
+#!/usr/bin/bash
+
+source "${OET_PATH}"/testcases/feature-test/oeaware/common/common.sh
+
+function pre_test() {
+ LOG_INFO "Start to prepare the test environment."
+ systemctl restart oeaware
+
+ LOG_INFO "End to prepare the test environment."
+}
+
+function run_test() {
+ LOG_INFO "Start to run test."
+ oeawarectl -e dynamic_smt_tune | grep "Instance enabled successfully"
+ CHECK_RESULT $? 0 0 "Check SMT failed"
+ oeawarectl -d dynamic_smt_tune
+ CHECK_RESULT $? 0 0 "Check SMT failed"
+ oeawarectl -e dynamic_smt_tune -d 1 | grep -E 'Instance enabled failed, because params .*? invalid.'
+ CHECK_RESULT $? 0 0 "Check SMT invalid params failed"
+ oeawarectl -e dynamic_smt_tune -i 1 | grep -E 'Instance enabled failed, because params .*? invalid.'
+ CHECK_RESULT $? 0 0 "Check SMT invalid params failed"
+ oeawarectl -e dynamic_smt_tune -e 1 | grep -E 'Instance enabled failed, because params .*? invalid.'
+ CHECK_RESULT $? 0 0 "Check SMT invalid params failed"
+ oeawarectl -e dynamic_smt_tune -threshold_test 1 | grep -E 'Instance enabled failed, because params .*? invalid.'
+ CHECK_RESULT $? 0 0 "Check SMT invalid params failed"
+ LOG_INFO "End to run test."
+}
+
+function post_test() {
+ LOG_INFO "Start to restore the test environment."
+
+ LOG_INFO "End to restore the test environment."
+}
+
+main "$@"
diff --git a/testcases/feature-test/oeaware/oe_test_oeAware_87.sh b/testcases/feature-test/oeaware/oe_test_oeAware_87.sh
new file mode 100644
index 000000000..256a96540
--- /dev/null
+++ b/testcases/feature-test/oeaware/oe_test_oeAware_87.sh
@@ -0,0 +1,51 @@
+#!/usr/bin/bash
+
+source "${OET_PATH}"/testcases/feature-test/oeaware/common/common.sh
+
+function pre_test() {
+ LOG_INFO "Start to prepare the test environment."
+ systemctl restart oeaware
+ LOG_INFO "End to prepare the test environment."
+}
+
+function run_test() {
+ LOG_INFO "Start to run test."
+ oeawarectl -e numa_sched_tune | grep "Instance enabled successfully."
+ CHECK_RESULT $? 0 0 "Check disabling numa failed"
+ cat /sys/kernel/debug/sched_features | grep "PARAL"
+ CHECK_RESULT $? 0 0 "Check disabling numa failed"
+ oeawarectl -d numa_sched_tune | grep "Instance disabled successfully."
+ CHECK_RESULT $? 0 0 "Check disabling numa failed"
+ cat /sys/kernel/debug/sched_features | grep "NO_PARAL"
+ CHECK_RESULT $? 0 0 "Check disabling numa failed"
+
+ echo PARAL > /sys/kernel/debug/sched_features
+ CHECK_RESULT $? 0 0 "Check disabling numa failed"
+ oeawarectl -e numa_sched_tune | grep "Instance enabled successfully."
+ CHECK_RESULT $? 0 0 "Check disabling numa failed"
+ tail -n 10 /var/log/oeAware/server.log | grep "\[NUMA_SCHED\] numa sched is already enabled on system"
+ CHECK_RESULT $? 0 0 "Check disabling numa failed"
+ oeawarectl -d numa_sched_tune | grep "Instance disabled successfully."
+ CHECK_RESULT $? 0 0 "Check disabling numa failed"
+ cat /sys/kernel/debug/sched_features | grep "PARAL"
+ CHECK_RESULT $? 0 0 "Check disabling numa failed"
+
+ echo NO_PARAL > /sys/kernel/debug/sched_features
+ CHECK_RESULT $? 0 0 "Check disabling numa failed"
+ oeawarectl -e numa_sched_tune | grep "Instance enabled successfully."
+ CHECK_RESULT $? 0 0 "Check disabling numa failed"
+ cat /sys/kernel/debug/sched_features | grep "PARAL"
+ CHECK_RESULT $? 0 0 "Check disabling numa failed"
+ oeawarectl -d numa_sched_tune | grep "Instance disabled successfully."
+ CHECK_RESULT $? 0 0 "Check disabling numa failed"
+ cat /sys/kernel/debug/sched_features | grep "NO_PARAL"
+ CHECK_RESULT $? 0 0 "Check disabling numa failed"
+ LOG_INFO "End to run test."
+}
+
+function post_test() {
+ LOG_INFO "Start to restore the test environment."
+ LOG_INFO "End to restore the test environment."
+}
+
+main "$@"
--
Gitee
From b87077c31098a9f739d3390775214713acbcdd9e Mon Sep 17 00:00:00 2001
From: 13906515865
Date: Thu, 24 Jul 2025 17:31:00 +0800
Subject: [PATCH 3/7] edited
---
suite2cases/oeaware.json | 31 ++++++
suite2cases/oeaware_temp.json | 27 +++++
.../oeaware/.oe_test_oeAware_87.sh.swp | Bin 0 -> 12288 bytes
.../oeaware/oe_test_oeAware_61.sh | 92 +++++++++---------
.../oeaware/oe_test_oeAware_72.sh | 37 +++----
.../oeaware/oe_test_oeAware_73.sh | 41 ++++----
.../oeaware/oe_test_oeAware_74.sh | 50 ++++++----
.../oeaware/oe_test_oeAware_75.sh | 46 +++++----
.../oeaware/oe_test_oeAware_76.sh | 64 ++++++------
.../oeaware/oe_test_oeAware_81.sh | 48 +++++----
.../oeaware/oe_test_oeAware_82.sh | 57 ++++++-----
.../oeaware/oe_test_oeAware_83.sh | 50 ++++++----
.../oeaware/oe_test_oeAware_87.sh | 80 ++++++++-------
testcases/feature-test/oeaware/test | 0
14 files changed, 371 insertions(+), 252 deletions(-)
create mode 100644 testcases/feature-test/oeaware/.oe_test_oeAware_87.sh.swp
create mode 100644 testcases/feature-test/oeaware/test
diff --git a/suite2cases/oeaware.json b/suite2cases/oeaware.json
index 655d39778..9139c6492 100644
--- a/suite2cases/oeaware.json
+++ b/suite2cases/oeaware.json
@@ -132,6 +132,37 @@
},
{
"name": "oe_test_oeAware_49"
+ },
+ {
+ "name": "oe_test_oeAware_61"
+ },
+ {
+ "name": "oe_test_oeAware_72"
+ },
+ {
+ "name": "oe_test_oeAware_73"
+ },
+ {
+ "name": "oe_test_oeAware_74"
+ },
+ {
+ "name": "oe_test_oeAware_75"
+ },
+ {
+ "name": "oe_test_oeAware_76"
+ },
+ {
+ "name": "oe_test_oeAware_81"
+ },
+ {
+ "name": "oe_test_oeAware_82"
+ },
+ {
+ "name": "oe_test_oeAware_83"
+ },
+ {
+ "name": "oe_test_oeAware_87"
}
+
]
}
diff --git a/suite2cases/oeaware_temp.json b/suite2cases/oeaware_temp.json
index 8315f6d16..f5a88866a 100644
--- a/suite2cases/oeaware_temp.json
+++ b/suite2cases/oeaware_temp.json
@@ -1,8 +1,35 @@
{
"path": "$OET_PATH/testcases/feature-test/oeaware",
"cases": [
+ {
+ "name": "oe_test_oeAware_61"
+ },
+ {
+ "name": "oe_test_oeAware_72"
+ },
{
"name": "oe_test_oeAware_73"
+ },
+ {
+ "name": "oe_test_oeAware_74"
+ },
+ {
+ "name": "oe_test_oeAware_75"
+ },
+ {
+ "name": "oe_test_oeAware_76"
+ },
+ {
+ "name": "oe_test_oeAware_81"
+ },
+ {
+ "name": "oe_test_oeAware_82"
+ },
+ {
+ "name": "oe_test_oeAware_83"
+ },
+ {
+ "name": "oe_test_oeAware_87"
}
]
}
diff --git a/testcases/feature-test/oeaware/.oe_test_oeAware_87.sh.swp b/testcases/feature-test/oeaware/.oe_test_oeAware_87.sh.swp
new file mode 100644
index 0000000000000000000000000000000000000000..875db05b310e8d9f2e01fa05ad32af0160e8cbe1
GIT binary patch
literal 12288
zcmeI2&u<$=6vwAX{3>lha7KFBL@FTJn>0La92()+D0V8OHd>8$$M&l08M8AR
z8rleP0&(EN6~wt*kT?VVCuo&_pb|nzRN_JpNPPE4;bL?4ZJL%F_((ImXW1eZP0(^c{Bn1IA*dOnQ4%i75EP-zyro6A7^a#2^f$6
z|L^|&f9F}oega>B>);cx0p`H3#~Aw&+ydOsssxly>C`Q$miUpY-z-JP2GmMq&3u($u_RLa!*;}iwHZD~2q3$22X?Se@z(}1$
z`X*G82B<#Eip5@fnXW$Eu0&?`6gDiLuP(j;E1K)I7EQfI)A(@~d&0j=L8y5<3T2mM
z(&yCSVI+dwa-SP=^_F%oi&%=t4MaQXy4vrFz$?nwG+m=^EC%G%8{V6hwMs32^wB0P
z%HezyQ7Opm@$%VBBE{ZJD^eRSeNmV$0@8`^3#~gz6m9);D<11|#pvY<2}g}{wZiWy
z+VvZq9qDVlcc49*YyVU%jaAtL2_%DUL?uZx)8uaQ*o{;dMV?u4r>jN0DPmNRcK1U1
z(#87vYQ<|p+oekz`A*CtpF&MMiUkk0aJ1z9$02}CX+(cqTkXJNFZ1)iDe!hT!?=_^
zkk~u8t@hUfX_#DxNpYCQ_7Y&6$kF+4fSiOv>fg)D`5sDhO`62hEL^0Y?37Eh_Da3k
zs?=)a6g#)n=aP3toJU6|%)Xc&iQ-CV=)~OI@J4#FI_XQnJ?{e)D4|IzgM2j@s5I?k
zVr1TYys$$Pc`hxzU8UpM-nh0KkjF|26RcE~AwaRLB+gWg~
z%%Ad3opd&a^X}un5pLM?DZY1)Pqj*7q<75OsJ8G{(K^5FjH;mbRll#Kn|E39{N#k2
O=-6$C(rt58)Bgr()^7a(
literal 0
HcmV?d00001
diff --git a/testcases/feature-test/oeaware/oe_test_oeAware_61.sh b/testcases/feature-test/oeaware/oe_test_oeAware_61.sh
index f092b6573..fcbf536b5 100644
--- a/testcases/feature-test/oeaware/oe_test_oeAware_61.sh
+++ b/testcases/feature-test/oeaware/oe_test_oeAware_61.sh
@@ -3,57 +3,55 @@
source "${OET_PATH}"/libs/locallibs/common_lib.sh
function pre_test(){
- script_name=$(basename "$0")
- LOG_INFO "Start to prepare test environment for ${script_name}"
- if /etc/oeAware/analysis_config.yaml["${NODE1_FRAME}"="x86_64"];then
- echo "the environment does not support testing"
- exit 255
- fi
- DNF_INSTALL "oeAware-manager"
- systemctl restart oeaware
- cp /etc/oeAware/analysis_config.yaml /etc/oeAware/analysis_config.yaml_bak
- LOG_INFO "End to prepare the test environment"
+ LOG_INFO "Start to prepare the test environment."
+ if ["${NODE1_FRAME}"="x86_64"];then
+ echo "the environment does not support testing"
+ exit 255
+ fi
+ DNF_INSTALL "oeAware-manager"
+ systemctl restart oeaware
+ cp /etc/oeAware/analysis_config.yaml /etc/oeAware/analysis_config.yaml_bak
+ LOG_INFO "End to prepare the test environment"
}
function run_test(){
- LOG_INFO "Start to run the test."
- yaml_dir=/etc/oeAware/analysis_config.yaml
- # default threshold
- oeawarectl analysis -t 3 2>&1 | grep -E "Warn:.*docker_coordination_burst:(host|docker)_cpu_usage_threshold"
- CHECK_RESULT $? 0 1 "Check docker threshold failed."
- # change host threshold
- sed -i '/^ host_cpu_usage_threshold:/ s/[-?0-9]\+/1/' $yaml_dir
- oeawarectl analysis -t 3 2>&1 | grep -E "Warn:.*docker_coordination_burst:(host|docker)_cpu_usage_threshold"
- CHECK_RESULT $? 0 1 "Check docker threshold failed."
- sed -i '/^ host_cpu_usage_threshold:/ s/[-?0-9]\+/100/' $yaml_dir
- oeawarectl analysis -t 3 2>&1 | grep -E "Warn:.*docker_coordination_burst:(host|docker)_cpu_usage_threshold"
- CHECK_RESULT $? 0 1 "Check docker threshold failed."
- sed -i '/^ host_cpu_usage_threshold:/ s/[-?0-9]\+/-1/' $yaml_dir
- oeawarectl analysis -t 3 2>&1 | grep -E "Warn:.*docker_coordination_burst:host_cpu_usage_threshold"
- CHECK_RESULT $? 0 0 "Check docker threshold failed."
- sed -i '/^ host_cpu_usage_threshold:/ s/[-?0-9]\+/101/' $yaml_dir
- oeawarectl analysis -t 3 2>&1 | grep -E "Warn:.*docker_coordination_burst:host_cpu_usage_threshold"
- CHECK_RESULT $? 0 0 "Check docker threshold failed."
- sed -i '/^ host_cpu_usage_threshold:/ s/[-?0-9]\+/45/' $yaml_dir
- # change docker threshold
- sed -i '/^ docker_cpu_usage_threshold:/ s/[-?0-9]\+/1/' $yaml_dir
- oeawarectl analysis -t 3 2>&1 | grep -E "Warn:.*docker_coordination_burst:(host|docker)_cpu_usage_threshold"
- CHECK_RESULT $? 0 1 "Check docker threshold failed."
- sed -i '/^ docker_cpu_usage_threshold:/ s/[-?0-9]\+/100/' $yaml_dir
- oeawarectl analysis -t 3 2>&1 | grep -E "Warn:.*docker_coordination_burst:(host|docker)_cpu_usage_threshold"
- CHECK_RESULT $? 0 1 "Check docker threshold failed."
- sed -i '/^ docker_cpu_usage_threshold:/ s/[-?0-9]\+/-1/' $yaml_dir
- oeawarectl analysis -t 3 2>&1 | grep -E "Warn:.*docker_coordination_burst:docker_cpu_usage_threshold"
- CHECK_RESULT $? 0 0 "Check docker threshold failed."
- sed -i '/^ docker_cpu_usage_threshold:/ s/[-?0-9]\+/101/' $yaml_dir
- oeawarectl analysis -t 3 2>&1 | grep -E "Warn:.*docker_coordination_burst:docker_cpu_usage_threshold"
- CHECK_RESULT $? 0 0 "Check docker threshold failed."
- LOG_INFO "End to run the test."
+ LOG_INFO "Start to run the test."
+ yaml_dir=/etc/oeAware/analysis_config.yaml
+ oeawarectl analysis -t 3 2>&1 | grep -E "Warn:.*docker_coordination_burst:(host|docker)_cpu_usage_threshold"
+ CHECK_RESULT $? 0 1 "Check docker threshold failed."
+
+ sed -i '/^ host_cpu_usage_threshold:/ s/[-?0-9]\+/1/' $yaml_dir
+ oeawarectl analysis -t 3 2>&1 | grep -E "Warn:.*docker_coordination_burst:(host|docker)_cpu_usage_threshold"
+ CHECK_RESULT $? 0 1 "Check docker threshold failed."
+ sed -i '/^ host_cpu_usage_threshold:/ s/[-?0-9]\+/100/' $yaml_dir
+ oeawarectl analysis -t 3 2>&1 | grep -E "Warn:.*docker_coordination_burst:(host|docker)_cpu_usage_threshold"
+ CHECK_RESULT $? 0 1 "Check docker threshold failed."
+ sed -i '/^ host_cpu_usage_threshold:/ s/[-?0-9]\+/-1/' $yaml_dir
+ oeawarectl analysis -t 3 2>&1 | grep -E "Warn:.*docker_coordination_burst:host_cpu_usage_threshold"
+ CHECK_RESULT $? 0 0 "Check docker threshold failed."
+ sed -i '/^ host_cpu_usage_threshold:/ s/[-?0-9]\+/101/' $yaml_dir
+ oeawarectl analysis -t 3 2>&1 | grep -E "Warn:.*docker_coordination_burst:host_cpu_usage_threshold"
+ CHECK_RESULT $? 0 0 "Check docker threshold failed."
+ sed -i '/^ host_cpu_usage_threshold:/ s/[-?0-9]\+/45/' $yaml_dir
+
+ sed -i '/^ docker_cpu_usage_threshold:/ s/[-?0-9]\+/1/' $yaml_dir
+ oeawarectl analysis -t 3 2>&1 | grep -E "Warn:.*docker_coordination_burst:(host|docker)_cpu_usage_threshold"
+ CHECK_RESULT $? 0 1 "Check docker threshold failed."
+ sed -i '/^ docker_cpu_usage_threshold:/ s/[-?0-9]\+/100/' $yaml_dir
+ oeawarectl analysis -t 3 2>&1 | grep -E "Warn:.*docker_coordination_burst:(host|docker)_cpu_usage_threshold"
+ CHECK_RESULT $? 0 1 "Check docker threshold failed."
+ sed -i '/^ docker_cpu_usage_threshold:/ s/[-?0-9]\+/-1/' $yaml_dir
+ oeawarectl analysis -t 3 2>&1 | grep -E "Warn:.*docker_coordination_burst:docker_cpu_usage_threshold"
+ CHECK_RESULT $? 0 0 "Check docker threshold failed."
+ sed -i '/^ docker_cpu_usage_threshold:/ s/[-?0-9]\+/101/' $yaml_dir
+ oeawarectl analysis -t 3 2>&1 | grep -E "Warn:.*docker_coordination_burst:docker_cpu_usage_threshold"
+ CHECK_RESULT $? 0 0 "Check docker threshold failed."
+ LOG_INFO "End to run the test."
}
function post_test(){
- LOG_INFO "Start to restore the test environment"
- mv /etc/oeAware/analysis_config.yaml_bak /etc/oeAware/analysis_config.yaml
- DNF_REMOVE "$@"
- LOG_INFO "End to restore the test environment"
+ LOG_INFO "Start to restore the test environment"
+ mv /etc/oeAware/analysis_config.yaml_bak /etc/oeAware/analysis_config.yaml
+ DNF_REMOVE "$@"
+ LOG_INFO "End to restore the test environment"
}
main "$@"
diff --git a/testcases/feature-test/oeaware/oe_test_oeAware_72.sh b/testcases/feature-test/oeaware/oe_test_oeAware_72.sh
index 26650026d..6b705d71f 100644
--- a/testcases/feature-test/oeaware/oe_test_oeAware_72.sh
+++ b/testcases/feature-test/oeaware/oe_test_oeAware_72.sh
@@ -3,29 +3,32 @@
source "${OET_PATH}"/testcases/feature-test/oeaware/common/common.sh
function pre_test() {
- LOG_INFO "Start to prepare the test environment."
- systemctl restart oeaware
- # cp /etc/oeAware/analysis_config.yaml /etc/oeAware/analysis_config.yaml_bak
- LOG_INFO "End to prepare the test environment."
+ LOG_INFO "Start to prepare the test environment."
+ if ["${NODE1_FRAME}"="x86_64"];then
+ echo "the environment does not support testing"
+ exit 255
+ fi
+ DNF_INSTALL "oeAware-manager"
+ systemctl restart oeaware
+ LOG_INFO "End to prepare the test environment."
}
function run_test() {
- LOG_INFO "Start to run test."
- oeawarectl analysis -t 3 | grep "use dynamic smt"
- CHECK_RESULT $? 0 0 "Check SMT suggestion failed."
- local cpu_count=$(lscpu | grep -w 'CPU:' | awk '{print $2}')
- local thresh=$(awk '/dynamic_smt:/ {getline; print $2}' /etc/oeAware/analysis_config.yaml)
- stress-ng --cpu $(( cpu_count*(thresh+3)/100 )) -t 30 &
- # stress-ng --cpu $cpu_count -t 30 &
- oeawarectl analysis -t 3 | grep "use dynamic smt"
- CHECK_RESULT $? 0 1 "Check SMT suggestion failed."
- LOG_INFO "End to run test."
+ LOG_INFO "Start to run test."
+ oeawarectl analysis -t 3 | grep "use dynamic smt"
+ CHECK_RESULT $? 0 0 "Check SMT suggestion failed."
+ local cpu_count=$(lscpu | grep -w 'CPU:' | awk '{print $2}')
+ local thresh=$(awk '/dynamic_smt:/ {getline; print $2}' /etc/oeAware/analysis_config.yaml)
+ stress-ng --cpu $(( cpu_count*(thresh+3)/100 )) -t 30 &
+ oeawarectl analysis -t 3 | grep "use dynamic smt"
+ CHECK_RESULT $? 0 1 "Check SMT suggestion failed."
+ LOG_INFO "End to run test."
}
function post_test() {
- LOG_INFO "Start to restore the test environment."
-
- LOG_INFO "End to restore the test environment."
+ LOG_INFO "Start to restore the test environment."
+ DNF_REMOVE "$@"
+ LOG_INFO "End to restore the test environment."
}
main "$@"
diff --git a/testcases/feature-test/oeaware/oe_test_oeAware_73.sh b/testcases/feature-test/oeaware/oe_test_oeAware_73.sh
index 574493762..936dc1cde 100644
--- a/testcases/feature-test/oeaware/oe_test_oeAware_73.sh
+++ b/testcases/feature-test/oeaware/oe_test_oeAware_73.sh
@@ -3,30 +3,35 @@
source "${OET_PATH}"/testcases/feature-test/oeaware/common/common.sh
function pre_test() {
- LOG_INFO "Start to prepare the test environment."
- systemctl restart oeaware
- cp /etc/oeAware/analysis_config.yaml /etc/oeAware/analysis_config.yaml_bak
- LOG_INFO "End to prepare the test environment."
+ LOG_INFO "Start to prepare the test environment."
+ if ["${NODE1_FRAME}"="x86_64"];then
+ echo "the environment does not support testing"
+ exit 255
+ fi
+ DNF_INSTALL "oeAware-manager"
+ systemctl restart oeaware
+ cp /etc/oeAware/analysis_config.yaml /etc/oeAware/analysis_config.yaml_bak
+ LOG_INFO "End to prepare the test environment."
}
function run_test() {
- LOG_INFO "Start to run test."
- local cpu_count=$(lscpu | grep -w 'CPU:' | awk '{print $2}')
- local thresh=70
- sed -i "/dynamic_smt:/ { n; s/\( *threshold:\)[[:space:]]*.*/ threshold: ${thresh}/; }" /etc/oeAware/analysis_config.yaml
- oeawarectl analysis -t 3 | grep "use dynamic smt"
- CHECK_RESULT $? 0 0 "Check SMT suggestion failed."
- stress-ng --cpu $(( cpu_count*(thresh+3)/100 )) -t 30 &
- # stress-ng --cpu $cpu_count -t 30 &
- oeawarectl analysis -t 3 | grep "use dynamic smt"
- CHECK_RESULT $? 0 1 "Check SMT suggestion failed."
- LOG_INFO "End to run test."
+ LOG_INFO "Start to run test."
+ local cpu_count=$(lscpu | grep -w 'CPU:' | awk '{print $2}')
+ local thresh=70
+ sed -i "/dynamic_smt:/ { n; s/\( *threshold:\)[[:space:]]*.*/ threshold: ${thresh}/; }" /etc/oeAware/analysis_config.yaml
+ oeawarectl analysis -t 3 | grep "use dynamic smt"
+ CHECK_RESULT $? 0 0 "Check SMT suggestion failed."
+ stress-ng --cpu $(( cpu_count*(thresh+3)/100 )) -t 30 &
+ oeawarectl analysis -t 3 | grep "use dynamic smt"
+ CHECK_RESULT $? 0 1 "Check SMT suggestion failed."
+ LOG_INFO "End to run test."
}
function post_test() {
- LOG_INFO "Start to restore the test environment."
- mv /etc/oeAware/analysis_config.yaml_bak /etc/oeAware/analysis_config.yaml
- LOG_INFO "End to restore the test environment."
+ LOG_INFO "Start to restore the test environment."
+ mv /etc/oeAware/analysis_config.yaml_bak /etc/oeAware/analysis_config.yaml
+ DNF_REMOVE "$@"
+ LOG_INFO "End to restore the test environment."
}
main "$@"
diff --git a/testcases/feature-test/oeaware/oe_test_oeAware_74.sh b/testcases/feature-test/oeaware/oe_test_oeAware_74.sh
index 5c02a4f2e..b8e9fd424 100644
--- a/testcases/feature-test/oeaware/oe_test_oeAware_74.sh
+++ b/testcases/feature-test/oeaware/oe_test_oeAware_74.sh
@@ -3,33 +3,41 @@
source "${OET_PATH}"/testcases/feature-test/oeaware/common/common.sh
function pre_test() {
- LOG_INFO "Start to prepare the test environment."
- systemctl restart oeaware
- cp /etc/oeAware/analysis_config.yaml /etc/oeAware/analysis_config.yaml_bak
- LOG_INFO "End to prepare the test environment."
+ LOG_INFO "Start to prepare the test environment."
+ if ["${NODE1_FRAME}"="x86_64"];then
+ echo "the environment does not support testing"
+ exit 255
+ fi
+ DNF_INSTALL "oeAware-manager"
+ systemctl restart oeaware
+ cp /etc/oeAware/analysis_config.yaml /etc/oeAware/analysis_config.yaml_bak
+ LOG_INFO "End to prepare the test environment."
}
function run_test() {
- LOG_INFO "Start to run test."
- sed -i '/^ threshold:/ s/\([0-9.-]\+\)/0/' /etc/oeAware/analysis_config.yaml
- oeawarectl analysis -t 3 2>&1 | grep -E "Warn:"
- CHECK_RESULT $? 0 1 "Check SMT threshold failed."
- sed -i '/^ threshold:/ s/\([0-9.-]\+\)/100/' /etc/oeAware/analysis_config.yaml
- oeawarectl analysis -t 3 2>&1 | grep -E "Warn:"
- CHECK_RESULT $? 0 1 "Check SMT threshold failed."
- sed -i '/^ threshold:/ s/\([0-9.-]\+\)/-1/' /etc/oeAware/analysis_config.yaml
- oeawarectl analysis -t 3 2>&1 | grep -E "Warn: analysis config 'dynamic_smt:threshold\(-?[0-9]+\)' value must be \[0, 100\]"
- CHECK_RESULT $? 0 0 "Check SMT threshold failed."
- sed -i '/^ threshold:/ s/\([0-9.-]\+\)/101/' /etc/oeAware/analysis_config.yaml
- oeawarectl analysis -t 3 2>&1 | grep -E "Warn: analysis config 'dynamic_smt:threshold\(-?[0-9]+\)' value must be \[0, 100\]"
- CHECK_RESULT $? 0 0 "Check SMT threshold failed."
- LOG_INFO "End to run test."
+ LOG_INFO "Start to run test."
+ oeawarectl analysis -t 3 2>&1 | grep "Warn: analysis config 'dynamic_smt:threshold\(-?[0-9]+\)' value must be \[0, 100\]"
+ CHECK_RESULT $? 0 1 "Check SMT threshold failed."
+ sed -i '/^ threshold:/ s/\([0-9.-]\+\)/0/' /etc/oeAware/analysis_config.yaml
+ oeawarectl analysis -t 3 2>&1 | grep -E "Warn: analysis config 'dynamic_smt:threshold\(-?[0-9]+\)' value must be \[0, 100\]"
+ CHECK_RESULT $? 0 1 "Check SMT threshold failed."
+ sed -i '/^ threshold:/ s/\([0-9.-]\+\)/100/' /etc/oeAware/analysis_config.yaml
+ oeawarectl analysis -t 3 2>&1 | grep -E "Warn: analysis config 'dynamic_smt:threshold\(-?[0-9]+\)' value must be \[0, 100\]"
+ CHECK_RESULT $? 0 1 "Check SMT threshold failed."
+ sed -i '/^ threshold:/ s/\([0-9.-]\+\)/-1/' /etc/oeAware/analysis_config.yaml
+ oeawarectl analysis -t 3 2>&1 | grep -E "Warn: analysis config 'dynamic_smt:threshold\(-?[0-9]+\)' value must be \[0, 100\]"
+ CHECK_RESULT $? 0 0 "Check SMT threshold failed."
+ sed -i '/^ threshold:/ s/\([0-9.-]\+\)/101/' /etc/oeAware/analysis_config.yaml
+ oeawarectl analysis -t 3 2>&1 | grep -E "Warn: analysis config 'dynamic_smt:threshold\(-?[0-9]+\)' value must be \[0, 100\]"
+ CHECK_RESULT $? 0 0 "Check SMT threshold failed."
+ LOG_INFO "End to run test."
}
function post_test() {
- LOG_INFO "Start to restore the test environment."
- mv /etc/oeAware/analysis_config.yaml_bak /etc/oeAware/analysis_config.yaml
- LOG_INFO "End to restore the test environment."
+ LOG_INFO "Start to restore the test environment."
+ mv /etc/oeAware/analysis_config.yaml_bak /etc/oeAware/analysis_config.yaml
+ DNF_REMOVE "$@"
+ LOG_INFO "End to restore the test environment."
}
main "$@"
diff --git a/testcases/feature-test/oeaware/oe_test_oeAware_75.sh b/testcases/feature-test/oeaware/oe_test_oeAware_75.sh
index dfc605931..73c7ebde3 100644
--- a/testcases/feature-test/oeaware/oe_test_oeAware_75.sh
+++ b/testcases/feature-test/oeaware/oe_test_oeAware_75.sh
@@ -3,32 +3,38 @@
source "${OET_PATH}"/testcases/feature-test/oeaware/common/common.sh
function pre_test() {
- LOG_INFO "Start to prepare the test environment."
- systemctl restart oeaware
- cp /etc/oeAware/analysis_config.yaml /etc/oeAware/analysis_config.yaml_bak
- LOG_INFO "End to prepare the test environment."
+ LOG_INFO "Start to prepare the test environment."
+ if ["${NODE1_FRAME}"="x86_64"];then
+ echo "the environment does not support testing"
+ exit 255
+ fi
+ DNF_INSTALL "oeAware-manager"
+ systemctl restart oeaware
+ cp /etc/oeAware/analysis_config.yaml /etc/oeAware/analysis_config.yaml_bak
+ LOG_INFO "End to prepare the test environment."
}
function run_test() {
- LOG_INFO "Start to run test."
- oeawarectl analysis -t 3 2>&1 | grep -E "Warn:"
- CHECK_RESULT $? 0 1 "Check SMT threshold failed."
- sed -i '/^dynamic_smt:/,/^ /{ /threshold:/ s/\(threshold:\s*\)[^#]*\s*/\1IllegalInput / }' /etc/oeAware/analysis_config.yaml
- oeawarectl analysis -t 3 2>&1 | grep -E "Warn: analysis config 'dynamic_smt:threshold\([^)]+\)' value must be a number"
- CHECK_RESULT $? 0 0 "Check SMT threshold failed."
- sed -i '/^dynamic_smt:/,/^ /{ /threshold:/ s/\(threshold:\s*\)[^#]*\s*/\11i45lA\& / }' /etc/oeAware/analysis_config.yaml
- oeawarectl analysis -t 3 2>&1 | grep -E "Warn: analysis config 'dynamic_smt:threshold\([^)]+\)' value must be a number"
- CHECK_RESULT $? 0 0 "Check SMT threshold failed."
- sed -i '/^dynamic_smt:/,/^ /{ /threshold:/ s/\(threshold:\s*\)[^#]*\s*/\1$#\{\]?\> / }' /etc/oeAware/analysis_config.yaml
- oeawarectl analysis -t 3 2>&1 | grep -E "Warn: analysis config 'dynamic_smt:threshold\([^)]+\)' value must be a number"
- CHECK_RESULT $? 0 0 "Check SMT threshold failed."
- LOG_INFO "End to run test."
+ LOG_INFO "Start to run test."
+ oeawarectl analysis -t 3 2>&1 | grep -E "Warn: analysis config 'dynamic_smt:threshold\([^)]+\)' value must be a number"
+ CHECK_RESULT $? 0 1 "Check SMT threshold failed."
+ sed -i '/^dynamic_smt:/,/^ /{ /threshold:/ s/\(threshold:\s*\)[^#]*\s*/\1IllegalInput / }' /etc/oeAware/analysis_config.yaml
+ oeawarectl analysis -t 3 2>&1 | grep -E "Warn: analysis config 'dynamic_smt:threshold\([^)]+\)' value must be a number"
+ CHECK_RESULT $? 0 0 "Check SMT threshold failed."
+ sed -i '/^dynamic_smt:/,/^ /{ /threshold:/ s/\(threshold:\s*\)[^#]*\s*/\11i45lA\& / }' /etc/oeAware/analysis_config.yaml
+ oeawarectl analysis -t 3 2>&1 | grep -E "Warn: analysis config 'dynamic_smt:threshold\([^)]+\)' value must be a number"
+ CHECK_RESULT $? 0 0 "Check SMT threshold failed."
+ sed -i '/^dynamic_smt:/,/^ /{ /threshold:/ s/\(threshold:\s*\)[^#]*\s*/\1$#\{\]?\> / }' /etc/oeAware/analysis_config.yaml
+ oeawarectl analysis -t 3 2>&1 | grep -E "Warn: analysis config 'dynamic_smt:threshold\([^)]+\)' value must be a number"
+ CHECK_RESULT $? 0 0 "Check SMT threshold failed."
+ LOG_INFO "End to run test."
}
function post_test() {
- LOG_INFO "Start to restore the test environment."
- mv /etc/oeAware/analysis_config.yaml_bak /etc/oeAware/analysis_config.yaml
- LOG_INFO "End to restore the test environment."
+ LOG_INFO "Start to restore the test environment."
+ mv /etc/oeAware/analysis_config.yaml_bak /etc/oeAware/analysis_config.yaml
+ DNF_REMOVE "$@"
+ LOG_INFO "End to restore the test environment."
}
main "$@"
diff --git a/testcases/feature-test/oeaware/oe_test_oeAware_76.sh b/testcases/feature-test/oeaware/oe_test_oeAware_76.sh
index daccb08de..6dafa643b 100644
--- a/testcases/feature-test/oeaware/oe_test_oeAware_76.sh
+++ b/testcases/feature-test/oeaware/oe_test_oeAware_76.sh
@@ -3,43 +3,49 @@
source "${OET_PATH}"/testcases/feature-test/oeaware/common/common.sh
function pre_test() {
- LOG_INFO "Start to prepare the test environment."
- systemctl restart oeaware
- temp_dir=$(mktemp -d)
- mkdir "$temp_dir/low/"
- mkdir "$temp_dir/high/"
- LOG_INFO "End to prepare the test environment."
+ LOG_INFO "Start to prepare the test environment."
+ if ["${NODE1_FRAME}"="x86_64"];then
+ echo "the environment does not support testing"
+ exit 255
+ fi
+ DNF_INSTALL "oeAware-manager"
+ systemctl restart oeaware
+ temp_dir=$(mktemp -d)
+ mkdir "$temp_dir/low/"
+ mkdir "$temp_dir/high/"
+ LOG_INFO "End to prepare the test environment."
}
function run_test() {
- LOG_INFO "Start to run test."
- local test_rounds=5
- local flag=0
+ LOG_INFO "Start to run test."
+ local test_rounds=5
+ local flag=0
- oeawarectl analysis -t 3 | grep -A 100 'Analysis Suggestion' | awk '/^$/ {exit} 1' > "$temp_dir/low/suggestion_1.txt"
- for ((i=2;i "$temp_dir/low/suggestion_$i.txt"
- diff "$temp_dir/low/suggestion_${i-1}.txt" "$temp_dir/low/suggestion_$i.txt"
- CHECK_RESULT $? 0 0 "Check repeated analysis suggestion failed"
- done
+ oeawarectl analysis -t 3 | grep -A 100 'Analysis Suggestion' | awk '/^$/ {exit} 1' > "$temp_dir/low/suggestion_1.txt"
+ for ((i=2;i "$temp_dir/low/suggestion_$i.txt"
+ diff "$temp_dir/low/suggestion_${i-1}.txt" "$temp_dir/low/suggestion_$i.txt"
+ CHECK_RESULT $? 0 0 "Check repeated analysis suggestion failed"
+ done
- stress-ng --cpu 6 -t 60 &
- SLEEP 5
- oeawarectl analysis -t 3 | grep -A 100 'Analysis Suggestion' | awk '/^$/ {exit} 1' > "$temp_dir/high/suggestion_1.txt"
- for ((i=2;i "$temp_dir/high/suggestion_$i.txt"
- diff "$temp_dir/high/suggestion_${i-1}.txt" "$temp_dir/high/suggestion_$i.txt"
- CHECK_RESULT $? 0 0 "Check repeated analysis suggestion failed"
- done
- LOG_INFO "End to run test."
+ stress-ng --cpu 6 -t 60 &
+ SLEEP 5
+ oeawarectl analysis -t 3 | grep -A 100 'Analysis Suggestion' | awk '/^$/ {exit} 1' > "$temp_dir/high/suggestion_1.txt"
+ for ((i=2;i "$temp_dir/high/suggestion_$i.txt"
+ diff "$temp_dir/high/suggestion_${i-1}.txt" "$temp_dir/high/suggestion_$i.txt"
+ CHECK_RESULT $? 0 0 "Check repeated analysis suggestion failed"
+ done
+ LOG_INFO "End to run test."
}
function post_test() {
- LOG_INFO "Start to restore the test environment."
- rm -rf ${temp_dir}
- LOG_INFO "End to restore the test environment."
+ LOG_INFO "Start to restore the test environment."
+ rm -rf ${temp_dir}
+ DNF_REMOVE "$@"
+ LOG_INFO "End to restore the test environment."
}
main "$@"
diff --git a/testcases/feature-test/oeaware/oe_test_oeAware_81.sh b/testcases/feature-test/oeaware/oe_test_oeAware_81.sh
index d2a8c400a..5650a8cc5 100644
--- a/testcases/feature-test/oeaware/oe_test_oeAware_81.sh
+++ b/testcases/feature-test/oeaware/oe_test_oeAware_81.sh
@@ -3,32 +3,40 @@
source "${OET_PATH}"/testcases/feature-test/oeaware/common/common.sh
function pre_test() {
- LOG_INFO "Start to prepare the test environment."
- systemctl restart oeaware
-
- LOG_INFO "End to prepare the test environment."
+ LOG_INFO "Start to prepare the test environment."
+ systemctl restart oeaware
+ if ["${NODE1_FRAME}"="x86_64"];then
+ echo "the environment does not support testing"
+ exit 255
+ fi
+ if ["$(cat /sys/devices/system/cpu/smt/active)"="1"];then
+ echo "the environment does not support testing"
+ exit 255
+ fi
+ DNF_INSTALL "oeAware-manager"
+ LOG_INFO "End to prepare the test environment."
}
function run_test() {
- LOG_INFO "Start to run test."
- oeawarectl -e dynamic_smt_tune -threshold 0
- grep -q "^0" /proc/sys/kernel/sched_util_ratio
- CHECK_RESULT $? 0 0 "Check SMT by threshold failed"
- oeawarectl -d dynamic_smt_tune | grep "Instance disabled successfully"
- CHECK_RESULT $? 0 0 "Check SMT by threshold failed"
- oeawarectl -e dynamic_smt_tune -threshold -1 2>&1 | grep "Instance enabled failed, because the threshold range is \[0, 100\], but is -1"
- CHECK_RESULT $? 0 0 "Check SMT beyond threshold failed"
- oeawarectl -d dynamic_smt_tune | grep "Instance disabled successfully"
- oeawarectl -e dynamic_smt_tune -threshold 101 2>&1 | grep "Instance enabled failed, because the threshold range is \[0, 100\], but is 101"
- CHECK_RESULT $? 0 0 "Check SMT beyond threshold failed"
- oeawarectl -d dynamic_smt_tune | grep "Instance disabled successfully"
- LOG_INFO "End to run test."
+ LOG_INFO "Start to run test."
+ oeawarectl -e dynamic_smt_tune -threshold 0 | grep "Instance enabled successfully"
+ grep -q "^0" /proc/sys/kernel/sched_util_ratio
+ CHECK_RESULT $? 0 0 "Check SMT by threshold failed"
+ oeawarectl -d dynamic_smt_tune | grep "Instance disabled successfully"
+ CHECK_RESULT $? 0 0 "Check SMT by threshold failed"
+ oeawarectl -e dynamic_smt_tune -threshold -1 2>&1 | grep "Instance enabled failed, because the threshold range is \[0, 100\], but is -1"
+ CHECK_RESULT $? 0 0 "Check SMT beyond threshold failed"
+ oeawarectl -d dynamic_smt_tune | grep "Instance disabled successfully"
+ oeawarectl -e dynamic_smt_tune -threshold 101 2>&1 | grep "Instance enabled failed, because the threshold range is \[0, 100\], but is 101"
+ CHECK_RESULT $? 0 0 "Check SMT beyond threshold failed"
+ oeawarectl -d dynamic_smt_tune | grep "Instance disabled successfully"
+ LOG_INFO "End to run test."
}
function post_test() {
- LOG_INFO "Start to restore the test environment."
-
- LOG_INFO "End to restore the test environment."
+ LOG_INFO "Start to restore the test environment."
+ DNF_REMOVE "$@"
+ LOG_INFO "End to restore the test environment."
}
main "$@"
diff --git a/testcases/feature-test/oeaware/oe_test_oeAware_82.sh b/testcases/feature-test/oeaware/oe_test_oeAware_82.sh
index 8e9cec32a..0d5cc3b54 100644
--- a/testcases/feature-test/oeaware/oe_test_oeAware_82.sh
+++ b/testcases/feature-test/oeaware/oe_test_oeAware_82.sh
@@ -3,36 +3,45 @@
source "${OET_PATH}"/testcases/feature-test/oeaware/common/common.sh
function pre_test() {
- LOG_INFO "Start to prepare the test environment."
- systemctl restart oeaware
-
- LOG_INFO "End to prepare the test environment."
+ LOG_INFO "Start to prepare the test environment."
+ systemctl restart oeaware
+ if ["${NODE1_FRAME}"="x86_64"];then
+ echo "the environment does not support testing"
+ exit 255
+ fi
+ if ["$(cat /sys/devices/system/cpu/smt/active)"="1"];then
+ echo "the environment does not support testing"
+ exit 255
+ fi
+ DNF_INSTALL "oeAware-manager"
+ LOG_INFO "End to prepare the test environment."
}
function run_test() {
- LOG_INFO "Start to run test."
- oeawarectl -e dynamic_smt_tune | grep "Instance enabled successfully"
- CHECK_RESULT $? 0 0 "Check SMT failed"
- oeawarectl -d dynamic_smt_tune | grep "Instance disabled successfully"
- oeawarectl -e dynamic_smt_tune -threshold "IllegalInputs" | grep "Instance enabled failed, because threshold value is not a integer"
- CHECK_RESULT $? 0 0 "Check SMT invalid threshold failed"
- oeawarectl -d dynamic_smt_tune
- oeawarectl -e dynamic_smt_tune -threshold "%1!4@5\(IA?" | grep "Instance enabled failed, because threshold value is not a integer"
- CHECK_RESULT $? 0 0 "Check SMT invalid threshold failed"
- oeawarectl -d dynamic_smt_tune
- oeawarectl -e dynamic_smt_tune -threshold "@\>1\&:\}!?" | grep "Instance enabled failed, because threshold value is not a integer"
- CHECK_RESULT $? 0 0 "Check SMT invalid threshold failed"
- oeawarectl -d dynamic_smt_tune
- oeawarectl -e dynamic_smt_tune -threshold 191.801 | grep "Instance enabled failed, because threshold value is not a integer"
- CHECK_RESULT $? 0 0 "Check SMT invalid threshold failed"
- oeawarectl -d dynamic_smt_tune
- LOG_INFO "End to run test."
+ LOG_INFO "Start to run test."
+ oeawarectl -e dynamic_smt_tune | grep "Instance enabled successfully"
+ CHECK_RESULT $? 0 0 "Check SMT failed"
+ oeawarectl -d dynamic_smt_tune | grep "Instance disabled successfully"
+ CHECK_RESULT $? 0 0 "Check SMT failed"
+ oeawarectl -e dynamic_smt_tune -threshold "IllegalInputs" | grep "Instance enabled failed, because threshold value is not a integer"
+ CHECK_RESULT $? 0 0 "Check SMT invalid threshold failed"
+ oeawarectl -d dynamic_smt_tune
+ oeawarectl -e dynamic_smt_tune -threshold "%1!4@5\(IA?" | grep "Instance enabled failed, because threshold value is not a integer"
+ CHECK_RESULT $? 0 0 "Check SMT invalid threshold failed"
+ oeawarectl -d dynamic_smt_tune
+ oeawarectl -e dynamic_smt_tune -threshold "@\>1\&:\}!?" | grep "Instance enabled failed, because threshold value is not a integer"
+ CHECK_RESULT $? 0 0 "Check SMT invalid threshold failed"
+ oeawarectl -d dynamic_smt_tune
+ oeawarectl -e dynamic_smt_tune -threshold 191.801 | grep "Instance enabled failed, because threshold value is not a integer"
+ CHECK_RESULT $? 0 0 "Check SMT invalid threshold failed"
+ oeawarectl -d dynamic_smt_tune
+ LOG_INFO "End to run test."
}
function post_test() {
- LOG_INFO "Start to restore the test environment."
-
- LOG_INFO "End to restore the test environment."
+ LOG_INFO "Start to restore the test environment."
+ DNF_REMOVE "$@"
+ LOG_INFO "End to restore the test environment."
}
main "$@"
diff --git a/testcases/feature-test/oeaware/oe_test_oeAware_83.sh b/testcases/feature-test/oeaware/oe_test_oeAware_83.sh
index 711d4f745..b252434f7 100644
--- a/testcases/feature-test/oeaware/oe_test_oeAware_83.sh
+++ b/testcases/feature-test/oeaware/oe_test_oeAware_83.sh
@@ -3,33 +3,41 @@
source "${OET_PATH}"/testcases/feature-test/oeaware/common/common.sh
function pre_test() {
- LOG_INFO "Start to prepare the test environment."
- systemctl restart oeaware
-
- LOG_INFO "End to prepare the test environment."
+ LOG_INFO "Start to prepare the test environment."
+ if ["${NODE1_FRAME}"="x86_64"];then
+ echo "the environment does not support testing"
+ exit 255
+ fi
+ if ["$(cat /sys/devices/system/cpu/smt/active)"="1"];then
+ echo "the environment does not support testing"
+ exit 255
+ fi
+ DNF_INSTALL "oeAware-manager"
+ systemctl restart oeaware
+ LOG_INFO "End to prepare the test environment."
}
function run_test() {
- LOG_INFO "Start to run test."
- oeawarectl -e dynamic_smt_tune | grep "Instance enabled successfully"
- CHECK_RESULT $? 0 0 "Check SMT failed"
- oeawarectl -d dynamic_smt_tune
- CHECK_RESULT $? 0 0 "Check SMT failed"
- oeawarectl -e dynamic_smt_tune -d 1 | grep -E 'Instance enabled failed, because params .*? invalid.'
- CHECK_RESULT $? 0 0 "Check SMT invalid params failed"
- oeawarectl -e dynamic_smt_tune -i 1 | grep -E 'Instance enabled failed, because params .*? invalid.'
- CHECK_RESULT $? 0 0 "Check SMT invalid params failed"
- oeawarectl -e dynamic_smt_tune -e 1 | grep -E 'Instance enabled failed, because params .*? invalid.'
- CHECK_RESULT $? 0 0 "Check SMT invalid params failed"
- oeawarectl -e dynamic_smt_tune -threshold_test 1 | grep -E 'Instance enabled failed, because params .*? invalid.'
- CHECK_RESULT $? 0 0 "Check SMT invalid params failed"
- LOG_INFO "End to run test."
+ LOG_INFO "Start to run test."
+ oeawarectl -e dynamic_smt_tune | grep "Instance enabled successfully"
+ CHECK_RESULT $? 0 0 "Check SMT failed"
+ oeawarectl -d dynamic_smt_tune | grep "Instance disabled successfully"
+ CHECK_RESULT $? 0 0 "Check SMT failed"
+ oeawarectl -e dynamic_smt_tune -d 1 | grep -E 'Instance enabled failed, because params .*? invalid.'
+ CHECK_RESULT $? 0 0 "Check SMT invalid params failed"
+ oeawarectl -e dynamic_smt_tune -i 1 | grep -E 'Instance enabled failed, because params .*? invalid.'
+ CHECK_RESULT $? 0 0 "Check SMT invalid params failed"
+ oeawarectl -e dynamic_smt_tune -e 1 | grep -E 'Instance enabled failed, because params .*? invalid.'
+ CHECK_RESULT $? 0 0 "Check SMT invalid params failed"
+ oeawarectl -e dynamic_smt_tune -threshold_test 1 | grep -E 'Instance enabled failed, because params .*? invalid.'
+ CHECK_RESULT $? 0 0 "Check SMT invalid params failed"
+ LOG_INFO "End to run test."
}
function post_test() {
- LOG_INFO "Start to restore the test environment."
-
- LOG_INFO "End to restore the test environment."
+ LOG_INFO "Start to restore the test environment."
+ DNF_REMOVE "$@"
+ LOG_INFO "End to restore the test environment."
}
main "$@"
diff --git a/testcases/feature-test/oeaware/oe_test_oeAware_87.sh b/testcases/feature-test/oeaware/oe_test_oeAware_87.sh
index 256a96540..e0d08b9d7 100644
--- a/testcases/feature-test/oeaware/oe_test_oeAware_87.sh
+++ b/testcases/feature-test/oeaware/oe_test_oeAware_87.sh
@@ -3,49 +3,59 @@
source "${OET_PATH}"/testcases/feature-test/oeaware/common/common.sh
function pre_test() {
- LOG_INFO "Start to prepare the test environment."
- systemctl restart oeaware
- LOG_INFO "End to prepare the test environment."
+ LOG_INFO "Start to prepare the test environment."
+ if ["${NODE1_FRAME}"="x86_64"];then
+ echo "the environment does not support testing"
+ exit 255
+ fi
+ if ! grep -qE 'PARAL|NO_PARAL' /sys/kernel/debug/sched_features;then
+ echo "the environment does not support testing"
+ exit 255
+ fi
+ DNF_INSTALL "oeAware-manager"
+ systemctl restart oeaware
+ LOG_INFO "End to prepare the test environment."
}
function run_test() {
- LOG_INFO "Start to run test."
- oeawarectl -e numa_sched_tune | grep "Instance enabled successfully."
- CHECK_RESULT $? 0 0 "Check disabling numa failed"
- cat /sys/kernel/debug/sched_features | grep "PARAL"
- CHECK_RESULT $? 0 0 "Check disabling numa failed"
- oeawarectl -d numa_sched_tune | grep "Instance disabled successfully."
- CHECK_RESULT $? 0 0 "Check disabling numa failed"
- cat /sys/kernel/debug/sched_features | grep "NO_PARAL"
- CHECK_RESULT $? 0 0 "Check disabling numa failed"
+ LOG_INFO "Start to run test."
+ oeawarectl -e numa_sched_tune | grep "Instance enabled successfully."
+ CHECK_RESULT $? 0 0 "Check disabling numa failed"
+ cat /sys/kernel/debug/sched_features | grep "PARAL"
+ CHECK_RESULT $? 0 0 "Check disabling numa failed"
+ oeawarectl -d numa_sched_tune | grep "Instance disabled successfully."
+ CHECK_RESULT $? 0 0 "Check disabling numa failed"
+ cat /sys/kernel/debug/sched_features | grep "NO_PARAL"
+ CHECK_RESULT $? 0 0 "Check disabling numa failed"
- echo PARAL > /sys/kernel/debug/sched_features
- CHECK_RESULT $? 0 0 "Check disabling numa failed"
- oeawarectl -e numa_sched_tune | grep "Instance enabled successfully."
- CHECK_RESULT $? 0 0 "Check disabling numa failed"
- tail -n 10 /var/log/oeAware/server.log | grep "\[NUMA_SCHED\] numa sched is already enabled on system"
- CHECK_RESULT $? 0 0 "Check disabling numa failed"
- oeawarectl -d numa_sched_tune | grep "Instance disabled successfully."
- CHECK_RESULT $? 0 0 "Check disabling numa failed"
- cat /sys/kernel/debug/sched_features | grep "PARAL"
- CHECK_RESULT $? 0 0 "Check disabling numa failed"
+ echo PARAL > /sys/kernel/debug/sched_features
+ CHECK_RESULT $? 0 0 "Check disabling numa failed"
+ oeawarectl -e numa_sched_tune | grep "Instance enabled successfully."
+ CHECK_RESULT $? 0 0 "Check disabling numa failed"
+ tail -n 10 /var/log/oeAware/server.log | grep "\[NUMA_SCHED\] numa sched is already enabled on system"
+ CHECK_RESULT $? 0 0 "Check disabling numa failed"
+ oeawarectl -d numa_sched_tune | grep "Instance disabled successfully."
+ CHECK_RESULT $? 0 0 "Check disabling numa failed"
+ cat /sys/kernel/debug/sched_features | grep "PARAL"
+ CHECK_RESULT $? 0 0 "Check disabling numa failed"
- echo NO_PARAL > /sys/kernel/debug/sched_features
- CHECK_RESULT $? 0 0 "Check disabling numa failed"
- oeawarectl -e numa_sched_tune | grep "Instance enabled successfully."
- CHECK_RESULT $? 0 0 "Check disabling numa failed"
- cat /sys/kernel/debug/sched_features | grep "PARAL"
- CHECK_RESULT $? 0 0 "Check disabling numa failed"
- oeawarectl -d numa_sched_tune | grep "Instance disabled successfully."
- CHECK_RESULT $? 0 0 "Check disabling numa failed"
- cat /sys/kernel/debug/sched_features | grep "NO_PARAL"
- CHECK_RESULT $? 0 0 "Check disabling numa failed"
- LOG_INFO "End to run test."
+ echo NO_PARAL > /sys/kernel/debug/sched_features
+ CHECK_RESULT $? 0 0 "Check disabling numa failed"
+ oeawarectl -e numa_sched_tune | grep "Instance enabled successfully."
+ CHECK_RESULT $? 0 0 "Check disabling numa failed"
+ cat /sys/kernel/debug/sched_features | grep "PARAL"
+ CHECK_RESULT $? 0 0 "Check disabling numa failed"
+ oeawarectl -d numa_sched_tune | grep "Instance disabled successfully."
+ CHECK_RESULT $? 0 0 "Check disabling numa failed"
+ cat /sys/kernel/debug/sched_features | grep "NO_PARAL"
+ CHECK_RESULT $? 0 0 "Check disabling numa failed"
+ LOG_INFO "End to run test."
}
function post_test() {
- LOG_INFO "Start to restore the test environment."
- LOG_INFO "End to restore the test environment."
+ LOG_INFO "Start to restore the test environment."
+ DNF_REMOVE "$@"
+ LOG_INFO "End to restore the test environment."
}
main "$@"
diff --git a/testcases/feature-test/oeaware/test b/testcases/feature-test/oeaware/test
new file mode 100644
index 000000000..e69de29bb
--
Gitee
From edd4e8a2d335494a009ffe46e19915f45b15b571 Mon Sep 17 00:00:00 2001
From: 13906515865
Date: Thu, 24 Jul 2025 19:36:32 +0800
Subject: [PATCH 4/7] updated
---
suite2cases/oeaware_temp.json | 27 -
suite2cases/oeaware_temp.json_bak | 35 +
temp_txt/suggestion.txt | 18 -
temp_txt/suggestion_new.txt | 18 -
...e_87.sh.swp => .oe_test_oeAware_73.sh.swp} | Bin 12288 -> 12288 bytes
.../feature-test/oeaware/UnixBench/.cproject | 245 +
.../feature-test/oeaware/UnixBench/.project | 82 +
.../feature-test/oeaware/UnixBench/Makefile | 253 +
.../feature-test/oeaware/UnixBench/README | 418 +
testcases/feature-test/oeaware/UnixBench/Run | 1836 +++
.../feature-test/oeaware/UnixBench/USAGE | 394 +
.../oeaware/UnixBench/WRITING_TESTS | 133 +
.../oeaware/UnixBench/pgms/arithoh | Bin 0 -> 72872 bytes
.../oeaware/UnixBench/pgms/context1 | Bin 0 -> 73200 bytes
.../feature-test/oeaware/UnixBench/pgms/dhry2 | Bin 0 -> 73808 bytes
.../oeaware/UnixBench/pgms/dhry2reg | Bin 0 -> 73816 bytes
.../oeaware/UnixBench/pgms/double | Bin 0 -> 72872 bytes
.../feature-test/oeaware/UnixBench/pgms/execl | Bin 0 -> 74784 bytes
.../feature-test/oeaware/UnixBench/pgms/float | Bin 0 -> 72872 bytes
.../oeaware/UnixBench/pgms/fstime | Bin 0 -> 74064 bytes
.../oeaware/UnixBench/pgms/gfx-x11 | 476 +
.../feature-test/oeaware/UnixBench/pgms/hanoi | Bin 0 -> 72888 bytes
.../oeaware/UnixBench/pgms/index.base | 46 +
.../feature-test/oeaware/UnixBench/pgms/int | Bin 0 -> 72872 bytes
.../feature-test/oeaware/UnixBench/pgms/long | Bin 0 -> 72872 bytes
.../oeaware/UnixBench/pgms/looper | Bin 0 -> 73120 bytes
.../oeaware/UnixBench/pgms/multi.sh | 23 +
.../feature-test/oeaware/UnixBench/pgms/pipe | Bin 0 -> 73008 bytes
.../oeaware/UnixBench/pgms/register | Bin 0 -> 72872 bytes
.../feature-test/oeaware/UnixBench/pgms/short | Bin 0 -> 72872 bytes
.../feature-test/oeaware/UnixBench/pgms/spawn | Bin 0 -> 72952 bytes
.../oeaware/UnixBench/pgms/syscall | Bin 0 -> 73256 bytes
.../oeaware/UnixBench/pgms/tst.sh | 20 +
.../oeaware/UnixBench/pgms/unixbench.logo | 14 +
.../oeaware/UnixBench/pgms/whetstone-double | Bin 0 -> 73368 bytes
.../localhost.localdomain-2025-07-24-01 | 8 +
.../localhost.localdomain-2025-07-24-01.html | 51 +
.../oeaware/UnixBench/src/arith.c | 110 +
.../feature-test/oeaware/UnixBench/src/big.c | 597 +
.../oeaware/UnixBench/src/context1.c | 111 +
.../feature-test/oeaware/UnixBench/src/dhry.h | 435 +
.../oeaware/UnixBench/src/dhry_1.c | 431 +
.../oeaware/UnixBench/src/dhry_2.c | 209 +
.../oeaware/UnixBench/src/dummy.c | 319 +
.../oeaware/UnixBench/src/execl.c | 97 +
.../oeaware/UnixBench/src/fstime.c | 469 +
.../oeaware/UnixBench/src/hanoi.c | 77 +
.../oeaware/UnixBench/src/looper.c | 105 +
.../feature-test/oeaware/UnixBench/src/pipe.c | 68 +
.../oeaware/UnixBench/src/spawn.c | 80 +
.../oeaware/UnixBench/src/syscall.c | 109 +
.../oeaware/UnixBench/src/time-polling.c | 573 +
.../oeaware/UnixBench/src/timeit.c | 41 +
.../oeaware/UnixBench/src/ubgears.c | 650 +
.../oeaware/UnixBench/src/whets.c | 1289 ++
.../oeaware/UnixBench/testdir/cctest.c | 156 +
.../oeaware/UnixBench/testdir/dc.dat | 8 +
.../oeaware/UnixBench/testdir/large.txt | 10000 ++++++++++++++++
.../oeaware/UnixBench/testdir/sort.src | 362 +
.../oeaware/UnixBench/tmp/kill_run | 1 +
testcases/feature-test/oeaware/numafast | 0
.../oeaware/oe_test_oeAware_61.sh | 2 +-
.../oeaware/oe_test_oeAware_72.sh | 2 +-
.../oeaware/oe_test_oeAware_73.sh | 3 +-
.../oeaware/oe_test_oeAware_74.sh | 2 +-
.../oeaware/oe_test_oeAware_75.sh | 2 +-
.../oeaware/oe_test_oeAware_76.sh | 2 +-
.../oeaware/oe_test_oeAware_81.sh | 6 +-
.../oeaware/oe_test_oeAware_82.sh | 4 +-
.../oeaware/oe_test_oeAware_83.sh | 4 +-
.../oeaware/oe_test_oeAware_87.sh | 2 +-
testcases/feature-test/oeaware/server.crt | 19 +
testcases/feature-test/oeaware/server.csr | 16 +
testcases/feature-test/oeaware/server.key | 27 +
.../oeaware/smc-tools-main/.gitignore | 5 +
.../oeaware/smc-tools-main/CONTRIBUTING.md | 62 +
.../oeaware/smc-tools-main/LICENSE | 222 +
.../oeaware/smc-tools-main/Makefile | 212 +
.../oeaware/smc-tools-main/README.md | 217 +
.../oeaware/smc-tools-main/af_smc.7 | 147 +
.../feature-test/oeaware/smc-tools-main/dev.c | 545 +
.../feature-test/oeaware/smc-tools-main/dev.h | 25 +
.../oeaware/smc-tools-main/info.c | 199 +
.../oeaware/smc-tools-main/info.h | 21 +
.../oeaware/smc-tools-main/libnetlink.c | 311 +
.../oeaware/smc-tools-main/libnetlink.h | 69 +
.../oeaware/smc-tools-main/linkgroup.c | 636 +
.../oeaware/smc-tools-main/linkgroup.h | 22 +
.../oeaware/smc-tools-main/seid.c | 227 +
.../oeaware/smc-tools-main/seid.h | 21 +
.../oeaware/smc-tools-main/smc-device.8 | 162 +
.../oeaware/smc-tools-main/smc-linkgroup.8 | 216 +
.../oeaware/smc-tools-main/smc-preload.c | 153 +
.../smc-tools-main/smc-tools.autocomplete | 140 +
.../feature-test/oeaware/smc-tools-main/smc.8 | 110 +
.../feature-test/oeaware/smc-tools-main/smc.c | 157 +
.../oeaware/smc-tools-main/smc_chk | 572 +
.../oeaware/smc-tools-main/smc_chk.8 | 115 +
.../oeaware/smc-tools-main/smc_dbg | 113 +
.../oeaware/smc-tools-main/smc_pnet.8 | 177 +
.../oeaware/smc-tools-main/smc_pnet.c | 369 +
.../oeaware/smc-tools-main/smc_rnics | 301 +
.../oeaware/smc-tools-main/smc_rnics.8 | 123 +
.../oeaware/smc-tools-main/smc_run | 85 +
.../oeaware/smc-tools-main/smc_run.8 | 112 +
.../feature-test/oeaware/smc-tools-main/smcd | Bin 0 -> 221344 bytes
.../oeaware/smc-tools-main/smcd-device.8 | 166 +
.../oeaware/smc-tools-main/smcd-info.8 | 71 +
.../oeaware/smc-tools-main/smcd-linkgroup.8 | 222 +
.../oeaware/smc-tools-main/smcd-seid.8 | 59 +
.../oeaware/smc-tools-main/smcd-stats.8 | 201 +
.../oeaware/smc-tools-main/smcd-ueid.8 | 90 +
.../oeaware/smc-tools-main/smcd.8 | 123 +
.../feature-test/oeaware/smc-tools-main/smcr | Bin 0 -> 222608 bytes
.../oeaware/smc-tools-main/smcr.8 | 124 +
.../oeaware/smc-tools-main/smcss.8 | 407 +
.../oeaware/smc-tools-main/smcss.c | 423 +
.../oeaware/smc-tools-main/smctools_common.h | 545 +
.../oeaware/smc-tools-main/stats.c | 1071 ++
.../oeaware/smc-tools-main/stats.h | 128 +
.../oeaware/smc-tools-main/ueid.c | 266 +
.../oeaware/smc-tools-main/ueid.h | 21 +
.../oeaware/smc-tools-main/util.c | 145 +
.../oeaware/smc-tools-main/util.h | 44 +
124 files changed, 30360 insertions(+), 77 deletions(-)
create mode 100644 suite2cases/oeaware_temp.json_bak
delete mode 100644 temp_txt/suggestion.txt
delete mode 100644 temp_txt/suggestion_new.txt
rename testcases/feature-test/oeaware/{.oe_test_oeAware_87.sh.swp => .oe_test_oeAware_73.sh.swp} (75%)
create mode 100644 testcases/feature-test/oeaware/UnixBench/.cproject
create mode 100644 testcases/feature-test/oeaware/UnixBench/.project
create mode 100644 testcases/feature-test/oeaware/UnixBench/Makefile
create mode 100644 testcases/feature-test/oeaware/UnixBench/README
create mode 100755 testcases/feature-test/oeaware/UnixBench/Run
create mode 100644 testcases/feature-test/oeaware/UnixBench/USAGE
create mode 100644 testcases/feature-test/oeaware/UnixBench/WRITING_TESTS
create mode 100755 testcases/feature-test/oeaware/UnixBench/pgms/arithoh
create mode 100755 testcases/feature-test/oeaware/UnixBench/pgms/context1
create mode 100755 testcases/feature-test/oeaware/UnixBench/pgms/dhry2
create mode 100755 testcases/feature-test/oeaware/UnixBench/pgms/dhry2reg
create mode 100755 testcases/feature-test/oeaware/UnixBench/pgms/double
create mode 100755 testcases/feature-test/oeaware/UnixBench/pgms/execl
create mode 100755 testcases/feature-test/oeaware/UnixBench/pgms/float
create mode 100755 testcases/feature-test/oeaware/UnixBench/pgms/fstime
create mode 100755 testcases/feature-test/oeaware/UnixBench/pgms/gfx-x11
create mode 100755 testcases/feature-test/oeaware/UnixBench/pgms/hanoi
create mode 100644 testcases/feature-test/oeaware/UnixBench/pgms/index.base
create mode 100755 testcases/feature-test/oeaware/UnixBench/pgms/int
create mode 100755 testcases/feature-test/oeaware/UnixBench/pgms/long
create mode 100755 testcases/feature-test/oeaware/UnixBench/pgms/looper
create mode 100755 testcases/feature-test/oeaware/UnixBench/pgms/multi.sh
create mode 100755 testcases/feature-test/oeaware/UnixBench/pgms/pipe
create mode 100755 testcases/feature-test/oeaware/UnixBench/pgms/register
create mode 100755 testcases/feature-test/oeaware/UnixBench/pgms/short
create mode 100755 testcases/feature-test/oeaware/UnixBench/pgms/spawn
create mode 100755 testcases/feature-test/oeaware/UnixBench/pgms/syscall
create mode 100755 testcases/feature-test/oeaware/UnixBench/pgms/tst.sh
create mode 100644 testcases/feature-test/oeaware/UnixBench/pgms/unixbench.logo
create mode 100755 testcases/feature-test/oeaware/UnixBench/pgms/whetstone-double
create mode 100644 testcases/feature-test/oeaware/UnixBench/results/localhost.localdomain-2025-07-24-01
create mode 100644 testcases/feature-test/oeaware/UnixBench/results/localhost.localdomain-2025-07-24-01.html
create mode 100644 testcases/feature-test/oeaware/UnixBench/src/arith.c
create mode 100644 testcases/feature-test/oeaware/UnixBench/src/big.c
create mode 100644 testcases/feature-test/oeaware/UnixBench/src/context1.c
create mode 100644 testcases/feature-test/oeaware/UnixBench/src/dhry.h
create mode 100644 testcases/feature-test/oeaware/UnixBench/src/dhry_1.c
create mode 100644 testcases/feature-test/oeaware/UnixBench/src/dhry_2.c
create mode 100644 testcases/feature-test/oeaware/UnixBench/src/dummy.c
create mode 100644 testcases/feature-test/oeaware/UnixBench/src/execl.c
create mode 100644 testcases/feature-test/oeaware/UnixBench/src/fstime.c
create mode 100644 testcases/feature-test/oeaware/UnixBench/src/hanoi.c
create mode 100644 testcases/feature-test/oeaware/UnixBench/src/looper.c
create mode 100644 testcases/feature-test/oeaware/UnixBench/src/pipe.c
create mode 100644 testcases/feature-test/oeaware/UnixBench/src/spawn.c
create mode 100644 testcases/feature-test/oeaware/UnixBench/src/syscall.c
create mode 100644 testcases/feature-test/oeaware/UnixBench/src/time-polling.c
create mode 100644 testcases/feature-test/oeaware/UnixBench/src/timeit.c
create mode 100644 testcases/feature-test/oeaware/UnixBench/src/ubgears.c
create mode 100644 testcases/feature-test/oeaware/UnixBench/src/whets.c
create mode 100644 testcases/feature-test/oeaware/UnixBench/testdir/cctest.c
create mode 100644 testcases/feature-test/oeaware/UnixBench/testdir/dc.dat
create mode 100644 testcases/feature-test/oeaware/UnixBench/testdir/large.txt
create mode 100644 testcases/feature-test/oeaware/UnixBench/testdir/sort.src
create mode 100755 testcases/feature-test/oeaware/UnixBench/tmp/kill_run
delete mode 100644 testcases/feature-test/oeaware/numafast
create mode 100644 testcases/feature-test/oeaware/server.crt
create mode 100644 testcases/feature-test/oeaware/server.csr
create mode 100644 testcases/feature-test/oeaware/server.key
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/.gitignore
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/CONTRIBUTING.md
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/LICENSE
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/Makefile
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/README.md
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/af_smc.7
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/dev.c
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/dev.h
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/info.c
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/info.h
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/libnetlink.c
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/libnetlink.h
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/linkgroup.c
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/linkgroup.h
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/seid.c
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/seid.h
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/smc-device.8
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/smc-linkgroup.8
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/smc-preload.c
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/smc-tools.autocomplete
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/smc.8
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/smc.c
create mode 100755 testcases/feature-test/oeaware/smc-tools-main/smc_chk
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/smc_chk.8
create mode 100755 testcases/feature-test/oeaware/smc-tools-main/smc_dbg
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/smc_pnet.8
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/smc_pnet.c
create mode 100755 testcases/feature-test/oeaware/smc-tools-main/smc_rnics
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/smc_rnics.8
create mode 100755 testcases/feature-test/oeaware/smc-tools-main/smc_run
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/smc_run.8
create mode 100755 testcases/feature-test/oeaware/smc-tools-main/smcd
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/smcd-device.8
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/smcd-info.8
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/smcd-linkgroup.8
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/smcd-seid.8
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/smcd-stats.8
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/smcd-ueid.8
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/smcd.8
create mode 100755 testcases/feature-test/oeaware/smc-tools-main/smcr
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/smcr.8
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/smcss.8
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/smcss.c
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/smctools_common.h
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/stats.c
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/stats.h
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/ueid.c
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/ueid.h
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/util.c
create mode 100644 testcases/feature-test/oeaware/smc-tools-main/util.h
diff --git a/suite2cases/oeaware_temp.json b/suite2cases/oeaware_temp.json
index f5a88866a..625f6c08d 100644
--- a/suite2cases/oeaware_temp.json
+++ b/suite2cases/oeaware_temp.json
@@ -1,35 +1,8 @@
{
"path": "$OET_PATH/testcases/feature-test/oeaware",
"cases": [
- {
- "name": "oe_test_oeAware_61"
- },
- {
- "name": "oe_test_oeAware_72"
- },
- {
- "name": "oe_test_oeAware_73"
- },
- {
- "name": "oe_test_oeAware_74"
- },
- {
- "name": "oe_test_oeAware_75"
- },
- {
- "name": "oe_test_oeAware_76"
- },
{
"name": "oe_test_oeAware_81"
- },
- {
- "name": "oe_test_oeAware_82"
- },
- {
- "name": "oe_test_oeAware_83"
- },
- {
- "name": "oe_test_oeAware_87"
}
]
}
diff --git a/suite2cases/oeaware_temp.json_bak b/suite2cases/oeaware_temp.json_bak
new file mode 100644
index 000000000..f5a88866a
--- /dev/null
+++ b/suite2cases/oeaware_temp.json_bak
@@ -0,0 +1,35 @@
+{
+ "path": "$OET_PATH/testcases/feature-test/oeaware",
+ "cases": [
+ {
+ "name": "oe_test_oeAware_61"
+ },
+ {
+ "name": "oe_test_oeAware_72"
+ },
+ {
+ "name": "oe_test_oeAware_73"
+ },
+ {
+ "name": "oe_test_oeAware_74"
+ },
+ {
+ "name": "oe_test_oeAware_75"
+ },
+ {
+ "name": "oe_test_oeAware_76"
+ },
+ {
+ "name": "oe_test_oeAware_81"
+ },
+ {
+ "name": "oe_test_oeAware_82"
+ },
+ {
+ "name": "oe_test_oeAware_83"
+ },
+ {
+ "name": "oe_test_oeAware_87"
+ }
+ ]
+}
diff --git a/temp_txt/suggestion.txt b/temp_txt/suggestion.txt
deleted file mode 100644
index e84345fc3..000000000
--- a/temp_txt/suggestion.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-==================================================Analysis Suggestion===================================================
-+---------------------+---------------------+---------------------+
-|suggestion |operation |result |
-+---------------------+---------------------+---------------------+
-|Use numafast |step 1: if `oeaware |Optimizes memory acc |
-| |ctl -q | grep tune_n |ess locality with lo |
-| |uma_mem_access` not |w scheduling overhea |
-| |exist, install numaf |d |
-| |ast | |
-| |install : `oeawarect | |
-| |l -i numafast` | |
-| |load : `oeawarect | |
-| |l -l libtune_numa.so | |
-| |` | |
-| |step 2: enable insta | |
-| |nce `oeaware -e tune | |
-| |_numa_mem_access` | |
-+---------------------+---------------------+---------------------+
diff --git a/temp_txt/suggestion_new.txt b/temp_txt/suggestion_new.txt
deleted file mode 100644
index e84345fc3..000000000
--- a/temp_txt/suggestion_new.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-==================================================Analysis Suggestion===================================================
-+---------------------+---------------------+---------------------+
-|suggestion |operation |result |
-+---------------------+---------------------+---------------------+
-|Use numafast |step 1: if `oeaware |Optimizes memory acc |
-| |ctl -q | grep tune_n |ess locality with lo |
-| |uma_mem_access` not |w scheduling overhea |
-| |exist, install numaf |d |
-| |ast | |
-| |install : `oeawarect | |
-| |l -i numafast` | |
-| |load : `oeawarect | |
-| |l -l libtune_numa.so | |
-| |` | |
-| |step 2: enable insta | |
-| |nce `oeaware -e tune | |
-| |_numa_mem_access` | |
-+---------------------+---------------------+---------------------+
diff --git a/testcases/feature-test/oeaware/.oe_test_oeAware_87.sh.swp b/testcases/feature-test/oeaware/.oe_test_oeAware_73.sh.swp
similarity index 75%
rename from testcases/feature-test/oeaware/.oe_test_oeAware_87.sh.swp
rename to testcases/feature-test/oeaware/.oe_test_oeAware_73.sh.swp
index 875db05b310e8d9f2e01fa05ad32af0160e8cbe1..a3e30fe14bcd47d490f3ece4d55c4f511077f79a 100644
GIT binary patch
delta 885
zcmb`_OKTHR6bJBIq9SQ)TQ{PDJzj@oCS@KqwXG>ZO6{U*rO<~;S|+!X+su&3Ovs%W
zLmE>224OZXT)5DUyO3|-N^z|#7mDJhAWF9`JW1+fmyH*GFn8|b{5dlR*@G^jkbYv*jnHWDB8>LY~
z-lOixSU3Tb&6zPmex4!Z5PdTIekLFoJXBp!?etkcTtQfMZ;w5!R_Kh(0O3#K3}Y|GS`{M+?sHNY5Crs
zlh=Ks$pcn(wtsD7N>#N;U*>fkMa(6?ns
z^aJ^BF16iMGMSVpF^i{mb`n}b2l%@RQoTQE=nK%GgZtlWhwEF+J-#J9XZ-W-Pfya4
z?22xy_OJ9ityUqvw6>XwW9wP90P%XM?05l4R~vB!KWcExDh)0_ct`g)3Wv2P7c)R5
Zxq@GogIZK5q=7z~pEYM^D1=-3%U`HP1lha7KFBL@FTJn>0La92()+D0V8OHd>8$$M&l08M8AR
z8rleP0&(EN6~wt*kT?VVCuo&_pb|nzRN_JpNPPE4;bL?4ZJL%F_((ImXW1eZP0(^c{Bn1IA*dOnQ4%i75EP-zyro6A7^a#2^f$6
z|L^|&f9F}oega>B>);cx0p`H3#~Aw&+ydOsssxly>C`Q$miUpY-z-JP2GmMq&3u($u_RLa!*;}iwHZD~2q3$22X?Se@z(}1$
z`X*G82B<#Eip5@fnXW$Eu0&?`6gDiLuP(j;E1K)I7EQfI)A(@~d&0j=L8y5<3T2mM
z(&yCSVI+dwa-SP=^_F%oi&%=t4MaQXy4vrFz$?nwG+m=^EC%G%8{V6hwMs32^wB0P
z%HezyQ7Opm@$%VBBE{ZJD^eRSeNmV$0@8`^3#~gz6m9);D<11|#pvY<2}g}{wZiWy
z+VvZq9qDVlcc49*YyVU%jaAtL2_%DUL?uZx)8uaQ*o{;dMV?u4r>jN0DPmNRcK1U1
z(#87vYQ<|p+oekz`A*CtpF&MMiUkk0aJ1z9$02}CX+(cqTkXJNFZ1)iDe!hT!?=_^
zkk~u8t@hUfX_#DxNpYCQ_7Y&6$kF+4fSiOv>fg)D`5sDhO`62hEL^0Y?37Eh_Da3k
zs?=)a6g#)n=aP3toJU6|%)Xc&iQ-CV=)~OI@J4#FI_XQnJ?{e)D4|IzgM2j@s5I?k
zVr1TYys$$Pc`hxzU8UpM-nh0KkjF|26RcE~AwaRLB+gWg~
z%%Ad3opd&a^X}un5pLM?DZY1)Pqj*7q<75OsJ8G{(K^5FjH;mbRll#Kn|E39{N#k2
O=-6$C(rt58)Bgr()^7a(
diff --git a/testcases/feature-test/oeaware/UnixBench/.cproject b/testcases/feature-test/oeaware/UnixBench/.cproject
new file mode 100644
index 000000000..90ad35054
--- /dev/null
+++ b/testcases/feature-test/oeaware/UnixBench/.cproject
@@ -0,0 +1,245 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/testcases/feature-test/oeaware/UnixBench/.project b/testcases/feature-test/oeaware/UnixBench/.project
new file mode 100644
index 000000000..7945d2b1d
--- /dev/null
+++ b/testcases/feature-test/oeaware/UnixBench/.project
@@ -0,0 +1,82 @@
+
+
+ UnixBench
+
+
+
+
+
+ org.eclipse.cdt.managedbuilder.core.genmakebuilder
+ clean,full,incremental,
+
+
+ ?name?
+
+
+
+ org.eclipse.cdt.make.core.append_environment
+ true
+
+
+ org.eclipse.cdt.make.core.autoBuildTarget
+ all
+
+
+ org.eclipse.cdt.make.core.buildArguments
+
+
+
+ org.eclipse.cdt.make.core.buildCommand
+ make
+
+
+ org.eclipse.cdt.make.core.buildLocation
+ ${workspace_loc:/UnixBench/Debug}
+
+
+ org.eclipse.cdt.make.core.cleanBuildTarget
+ clean
+
+
+ org.eclipse.cdt.make.core.contents
+ org.eclipse.cdt.make.core.activeConfigSettings
+
+
+ org.eclipse.cdt.make.core.enableAutoBuild
+ false
+
+
+ org.eclipse.cdt.make.core.enableCleanBuild
+ true
+
+
+ org.eclipse.cdt.make.core.enableFullBuild
+ true
+
+
+ org.eclipse.cdt.make.core.fullBuildTarget
+ all
+
+
+ org.eclipse.cdt.make.core.stopOnError
+ true
+
+
+ org.eclipse.cdt.make.core.useDefaultBuildCmd
+ true
+
+
+
+
+ org.eclipse.cdt.managedbuilder.core.ScannerConfigBuilder
+
+
+
+
+
+ org.eclipse.cdt.core.cnature
+ org.eclipse.cdt.core.ccnature
+ org.eclipse.cdt.managedbuilder.core.managedBuildNature
+ org.eclipse.cdt.managedbuilder.core.ScannerConfigNature
+
+
diff --git a/testcases/feature-test/oeaware/UnixBench/Makefile b/testcases/feature-test/oeaware/UnixBench/Makefile
new file mode 100644
index 000000000..62f36e10b
--- /dev/null
+++ b/testcases/feature-test/oeaware/UnixBench/Makefile
@@ -0,0 +1,253 @@
+##############################################################################
+# UnixBench v5.1.3
+# Based on The BYTE UNIX Benchmarks - Release 3
+# Module: Makefile SID: 3.9 5/15/91 19:30:15
+#
+##############################################################################
+# Bug reports, patches, comments, suggestions should be sent to:
+# David C Niemi
+#
+# Original Contacts at Byte Magazine:
+# Ben Smith or Tom Yager at BYTE Magazine
+# bensmith@bytepb.byte.com tyager@bytepb.byte.com
+#
+##############################################################################
+# Modification Log: 7/28/89 cleaned out workload files
+# 4/17/90 added routines for installing from shar mess
+# 7/23/90 added compile for dhrystone version 2.1
+# (this is not part of Run file. still use old)
+# removed HZ from everything but dhry.
+# HZ is read from the environment, if not
+# there, you must define it in this file
+# 10/30/90 moved new dhrystone into standard set
+# new pgms (dhry included) run for a specified
+# time rather than specified number of loops
+# 4/5/91 cleaned out files not needed for
+# release 3 -- added release 3 files -ben
+# 10/22/97 added compiler options for strict ANSI C
+# checking for gcc and DEC's cc on
+# Digital Unix 4.x (kahn@zk3.dec.com)
+# 09/26/07 changes for UnixBench 5.0
+# 09/30/07 adding ubgears, GRAPHIC_TESTS switch
+# 10/14/07 adding large.txt
+# 01/13/11 added support for parallel compilation
+##############################################################################
+
+##############################################################################
+# CONFIGURATION
+##############################################################################
+
+SHELL = /bin/sh
+
+# GRAPHICS TESTS: Uncomment the definition of "GRAPHIC_TESTS" to enable
+# the building of the graphics benchmarks. This will require the
+# X11 libraries on your system.
+#
+# Comment the line out to disable these tests.
+# GRAPHIC_TESTS = defined
+
+# Set "GL_LIBS" to the libraries needed to link a GL program.
+GL_LIBS = -lGL -lXext -lX11
+
+
+# COMPILER CONFIGURATION: Set "CC" to the name of the compiler to use
+# to build the binary benchmarks. You should also set "$cCompiler" in the
+# Run script to the name of the compiler you want to test.
+CC=gcc
+
+# OPTIMISATION SETTINGS:
+
+## Very generic
+#OPTON = -O
+
+## For Linux 486/Pentium, GCC 2.7.x and 2.8.x
+#OPTON = -O2 -fomit-frame-pointer -fforce-addr -fforce-mem -ffast-math \
+# -m486 -malign-loops=2 -malign-jumps=2 -malign-functions=2
+
+## For Linux, GCC previous to 2.7.0
+#OPTON = -O2 -fomit-frame-pointer -fforce-addr -fforce-mem -ffast-math -m486
+
+#OPTON = -O2 -fomit-frame-pointer -fforce-addr -fforce-mem -ffast-math \
+# -m386 -malign-loops=1 -malign-jumps=1 -malign-functions=1
+
+## For Solaris 2, or general-purpose GCC 2.7.x
+OPTON = -O2 -fomit-frame-pointer -fforce-addr -ffast-math -Wall
+
+## For Digital Unix v4.x, with DEC cc v5.x
+#OPTON = -O4
+#CFLAGS = -DTIME -std1 -verbose -w0
+
+## generic gcc CFLAGS. -DTIME must be included.
+CFLAGS = -DTIME -Wall -pedantic -ansi
+
+
+##############################################################################
+# END CONFIGURATION
+##############################################################################
+
+
+# local directories
+PROGDIR = ./pgms
+SRCDIR = ./src
+TESTDIR = ./testdir
+RESULTDIR = ./results
+TMPDIR = ./tmp
+# other directories
+INCLDIR = /usr/include
+LIBDIR = /lib
+SCRIPTS = unixbench.logo multi.sh tst.sh index.base
+SOURCES = arith.c big.c context1.c \
+ dummy.c execl.c \
+ fstime.c hanoi.c \
+ pipe.c spawn.c \
+ syscall.c looper.c timeit.c time-polling.c \
+ dhry_1.c dhry_2.c dhry.h whets.c ubgears.c
+TESTS = sort.src cctest.c dc.dat large.txt
+
+ifdef GRAPHIC_TESTS
+GRAPHIC_BINS = $(PROGDIR)/ubgears
+else
+GRAPHIC_BINS =
+endif
+
+# Program binaries.
+BINS = $(PROGDIR)/arithoh $(PROGDIR)/register $(PROGDIR)/short \
+ $(PROGDIR)/int $(PROGDIR)/long $(PROGDIR)/float $(PROGDIR)/double \
+ $(PROGDIR)/hanoi $(PROGDIR)/syscall $(PROGDIR)/context1 \
+ $(PROGDIR)/pipe $(PROGDIR)/spawn $(PROGDIR)/execl \
+ $(PROGDIR)/dhry2 $(PROGDIR)/dhry2reg $(PROGDIR)/looper \
+ $(PROGDIR)/fstime $(PROGDIR)/whetstone-double $(GRAPHIC_BINS)
+## These compile only on some platforms...
+# $(PROGDIR)/poll $(PROGDIR)/poll2 $(PROGDIR)/select
+
+# Required non-binary files.
+REQD = $(BINS) $(PROGDIR)/unixbench.logo \
+ $(PROGDIR)/multi.sh $(PROGDIR)/tst.sh $(PROGDIR)/index.base \
+ $(PROGDIR)/gfx-x11 \
+ $(TESTDIR)/sort.src $(TESTDIR)/cctest.c $(TESTDIR)/dc.dat \
+ $(TESTDIR)/large.txt
+
+# ######################### the big ALL ############################
+all: distr programs
+## Ick!!! What is this about??? How about let's not chmod everything bogusly.
+# @chmod 744 * $(SRCDIR)/* $(PROGDIR)/* $(TESTDIR)/* $(DOCDIR)/*
+
+# ####################### a check for Run ######################
+check: $(REQD)
+ make all
+# ##############################################################
+# distribute the files out to subdirectories if they are in this one
+distr:
+ @echo "Checking distribution of files"
+# scripts
+ @if test ! -d $(PROGDIR) \
+ ; then \
+ mkdir $(PROGDIR) \
+ ; mv $(SCRIPTS) $(PROGDIR) \
+ ; else \
+ echo "$(PROGDIR) exists" \
+ ; fi
+# C sources
+ @if test ! -d $(SRCDIR) \
+ ; then \
+ mkdir $(SRCDIR) \
+ ; mv $(SOURCES) $(SRCDIR) \
+ ; else \
+ echo "$(SRCDIR) exists" \
+ ; fi
+# test data
+ @if test ! -d $(TESTDIR) \
+ ; then \
+ mkdir $(TESTDIR) \
+ ; mv $(TESTS) $(TESTDIR) \
+ ; else \
+ echo "$(TESTDIR) exists" \
+ ; fi
+# temporary work directory
+ @if test ! -d $(TMPDIR) \
+ ; then \
+ mkdir $(TMPDIR) \
+ ; else \
+ echo "$(TMPDIR) exists" \
+ ; fi
+# directory for results
+ @if test ! -d $(RESULTDIR) \
+ ; then \
+ mkdir $(RESULTDIR) \
+ ; else \
+ echo "$(RESULTDIR) exists" \
+ ; fi
+
+programs: $(BINS)
+
+# Individual programs
+$(PROGDIR)/arithoh: $(SRCDIR)/arith.c
+ $(CC) -o $(PROGDIR)/arithoh ${CFLAGS} ${OPTON} -Darithoh $(SRCDIR)/arith.c
+$(PROGDIR)/register: $(SRCDIR)/arith.c
+ $(CC) -o $(PROGDIR)/register ${CFLAGS} ${OPTON} -Ddatum='register int' $(SRCDIR)/arith.c
+$(PROGDIR)/short: $(SRCDIR)/arith.c
+ $(CC) -o $(PROGDIR)/short ${CFLAGS} ${OPTON} -Ddatum=short $(SRCDIR)/arith.c
+$(PROGDIR)/int: $(SRCDIR)/arith.c
+ $(CC) -o $(PROGDIR)/int ${CFLAGS} ${OPTON} -Ddatum=int $(SRCDIR)/arith.c
+$(PROGDIR)/long: $(SRCDIR)/arith.c
+ $(CC) -o $(PROGDIR)/long ${CFLAGS} ${OPTON} -Ddatum=long $(SRCDIR)/arith.c
+$(PROGDIR)/float: $(SRCDIR)/arith.c
+ $(CC) -o $(PROGDIR)/float ${CFLAGS} ${OPTON} -Ddatum=float $(SRCDIR)/arith.c
+$(PROGDIR)/double: $(SRCDIR)/arith.c
+ $(CC) -o $(PROGDIR)/double ${CFLAGS} ${OPTON} -Ddatum=double $(SRCDIR)/arith.c
+$(PROGDIR)/whetstone-double: $(SRCDIR)/whets.c
+ $(CC) -o $(PROGDIR)/whetstone-double ${CFLAGS} ${OPTON} -DDP -DUNIX -DUNIXBENCH $(SRCDIR)/whets.c -lm
+$(PROGDIR)/hanoi: $(SRCDIR)/hanoi.c
+ $(CC) -o $(PROGDIR)/hanoi ${CFLAGS} ${OPTON} $(SRCDIR)/hanoi.c
+
+$(PROGDIR)/poll: $(SRCDIR)/time-polling.c
+ $(CC) -DHAS_POLL -DUNIXBENCH -o $(PROGDIR)/poll ${CFLAGS} ${OPTON} $(SRCDIR)/time-polling.c
+
+$(PROGDIR)/poll2: $(SRCDIR)/time-polling.c
+ $(CC) -DHAS_POLL2 -DUNIXBENCH -o $(PROGDIR)/poll2 ${CFLAGS} ${OPTON} $(SRCDIR)/time-polling.c
+
+$(PROGDIR)/select: $(SRCDIR)/time-polling.c
+ $(CC) -DHAS_SELECT -DUNIXBENCH -o $(PROGDIR)/select ${CFLAGS} ${OPTON} $(SRCDIR)/time-polling.c
+
+$(PROGDIR)/fstime: $(SRCDIR)/fstime.c
+ $(CC) -o $(PROGDIR)/fstime ${CFLAGS} ${OPTON} $(SRCDIR)/fstime.c
+
+$(PROGDIR)/syscall: $(SRCDIR)/syscall.c
+ $(CC) -o $(PROGDIR)/syscall ${CFLAGS} ${OPTON} $(SRCDIR)/syscall.c
+$(PROGDIR)/context1: $(SRCDIR)/context1.c
+ $(CC) -o $(PROGDIR)/context1 ${CFLAGS} ${OPTON} $(SRCDIR)/context1.c
+$(PROGDIR)/pipe: $(SRCDIR)/pipe.c
+ $(CC) -o $(PROGDIR)/pipe ${CFLAGS} ${OPTON} $(SRCDIR)/pipe.c
+$(PROGDIR)/spawn: $(SRCDIR)/spawn.c
+ $(CC) -o $(PROGDIR)/spawn ${CFLAGS} ${OPTON} $(SRCDIR)/spawn.c
+$(PROGDIR)/execl: $(SRCDIR)/execl.c $(SRCDIR)/big.c
+ $(CC) -o $(PROGDIR)/execl ${CFLAGS} ${OPTON} $(SRCDIR)/execl.c
+
+$(PROGDIR)/dhry2: $(SRCDIR)/dhry_1.c $(SRCDIR)/dhry_2.c $(SRCDIR)/dhry.h
+ cd $(SRCDIR); $(CC) -c ${CFLAGS} -DHZ=${HZ} ${OPTON} dhry_1.c
+ cd $(SRCDIR); $(CC) -c ${CFLAGS} -DHZ=${HZ} ${OPTON} dhry_2.c
+ $(CC) -o $(PROGDIR)/dhry2 ${CFLAGS} ${OPTON} $(SRCDIR)/dhry_1.o $(SRCDIR)/dhry_2.o
+ cd $(SRCDIR); rm -f dhry_1.o dhry_2.o
+$(PROGDIR)/dhry2reg: $(SRCDIR)/dhry_1.c $(SRCDIR)/dhry_2.c $(SRCDIR)/dhry.h
+ cd $(SRCDIR); $(CC) -c ${CFLAGS} -DREG=register -DHZ=${HZ} ${OPTON} dhry_1.c -o dhry_1_reg.o
+ cd $(SRCDIR); $(CC) -c ${CFLAGS} -DREG=register -DHZ=${HZ} ${OPTON} dhry_2.c -o dhry_2_reg.o
+ $(CC) -o $(PROGDIR)/dhry2reg ${CFLAGS} ${OPTON} $(SRCDIR)/dhry_1_reg.o $(SRCDIR)/dhry_2_reg.o
+ cd $(SRCDIR); rm -f dhry_1_reg.o dhry_2_reg.o
+
+$(PROGDIR)/looper: $(SRCDIR)/looper.c
+ $(CC) -o $(PROGDIR)/looper ${CFLAGS} ${OPTON} $(SRCDIR)/looper.c
+
+$(PROGDIR)/ubgears: $(SRCDIR)/ubgears.c
+ $(CC) -o $(PROGDIR)/ubgears ${CFLAGS} ${OPTON} $(SRCDIR)/ubgears.c $(GL_LIBS)
+
+# Run the benchmarks and create the reports
+run:
+ sh ./Run
+
+clean:
+ rm -f $(BINS) core *~ */*~
+
+spotless: clean
+ rm -f $(RESULTDIR)/* $(TMPDIR)/*
+
+## END ##
diff --git a/testcases/feature-test/oeaware/UnixBench/README b/testcases/feature-test/oeaware/UnixBench/README
new file mode 100644
index 000000000..67cad92f5
--- /dev/null
+++ b/testcases/feature-test/oeaware/UnixBench/README
@@ -0,0 +1,418 @@
+Version 5.1.3 -- 2011-01-13
+
+================================================================
+To use Unixbench:
+
+1. UnixBench from version 5.1 on has both system and graphics tests.
+ If you want to use the graphic tests, edit the Makefile and make sure
+ that the line "GRAPHIC_TESTS = defined" is not commented out; then check
+ that the "GL_LIBS" definition is OK for your system. Also make sure
+ that the "x11perf" command is on your search path.
+
+ If you don't want the graphics tests, then comment out the
+ "GRAPHIC_TESTS = defined" line. Note: comment it out, don't
+ set it to anything.
+
+2. Do "make".
+
+3. Do "Run" to run the system test; "Run graphics" to run the graphics
+ tests; "Run gindex" to run both.
+
+You will need perl, as Run is written in perl.
+
+For more information on using the tests, read "USAGE".
+
+For information on adding tests into the benchmark, see "WRITING_TESTS".
+
+
+===================== RELEASE NOTES =====================================
+
+======================== Jan 13 ==========================
+
+v5.1.3
+
+Fixed issue that would cause a race condition if you attempted to compile in
+parallel with more than 3 parallel jobs.
+
+
+Kelly Lucas, Jan 13, 2011
+kdlucas at gmail period com
+
+
+======================== Dec 07 ==========================
+
+v5.1.2
+
+One big fix: if unixbench is installed in a directory whose pathname contains
+a space, it should now run (previously it failed).
+
+To avoid possible clashes, the environment variables unixbench uses are now
+prefixed with "UB_". These are all optional, and for most people will be
+completely unnecessary, but if you want you can set these:
+
+ UB_BINDIR Directory where the test programs live.
+ UB_TMPDIR Temp directory, for temp files.
+ UB_RESULTDIR Directory to put results in.
+ UB_TESTDIR Directory where the tests are executed.
+
+And a couple of tiny fixes:
+* In pgms/tst.sh, changed "sort -n +1" to "sort -n -k 1"
+* In Makefile, made it clearer that GRAPHIC_TESTS should be commented
+ out (not set to 0) to disable graphics
+Thanks to nordi for pointing these out.
+
+
+Ian Smith, December 26, 2007
+johantheghost at yahoo period com
+
+
+======================== Oct 07 ==========================
+
+v5.1.1
+
+It turns out that the setting of LANG is crucial to the results. This
+explains why people in different regions were seeing odd results, and also
+why runlevel 1 produced odd results -- runlevel 1 doesn't set LANG, and
+hence reverts to ASCII, whereas most people use a UTF-8 encoding, which is
+much slower in some tests (eg. shell tests).
+
+So now we manually set LANG to "en_US.utf8", which is configured with the
+variable "$language". Don't change this if you want to share your results.
+We also report the language settings in use.
+
+See "The Language Setting" in USAGE for more info. Thanks to nordi for
+pointing out the LANG issue.
+
+I also added the "grep" and "sysexec" tests. These are non-index tests,
+and "grep" uses the system's grep, so it's not much use for comparing
+different systems. But some folks on the OpenSuSE list have been finding
+these useful. They aren't in any of the main test groups; do "Run grep
+sysexec" to run them.
+
+Index Changes
+-------------
+
+The setting of LANG will affect consistency with systems where this is
+not the default value. However, it should produce more consistent results
+in future.
+
+
+Ian Smith, October 15, 2007
+johantheghost at yahoo period com
+
+
+======================== Oct 07 ==========================
+
+v5.1
+
+The major new feature in this version is the addition of graphical
+benchmarks. Since these may not compile on all systems, you can enable/
+disable them with the GRAPHIC_TESTS variable in the Makefile.
+
+As before, each test is run for 3 or 10 iterations. However, we now discard
+the worst 1/3 of the scores before averaging the remainder. The logic is
+that a glitch in the system (background process waking up, for example) may
+make one or two runs go slow, so let's discard those. Hopefully this will
+produce more consistent and repeatable results. Check the log file
+for a test run to see the discarded scores.
+
+Made the tests compile and run on x86-64/Linux (fixed an execl bug passing
+int instead of pointer).
+
+Also fixed some general bugs.
+
+Thanks to Stefan Esser for help and testing / bug reporting.
+
+Index Changes
+-------------
+
+The tests are now divided into categories, and each category generates
+its own index. This keeps the graphics test results separate from
+the system tests.
+
+The "graphics" test and corresponding index are new.
+
+The "discard the worst scores" strategy should produce slightly higher
+test scores, but at least they should (hopefully!) be more consistent.
+The scores should not be higher than the best scores you would have got
+with 5.0, so this should not be a huge consistency issue.
+
+Ian Smith, October 11, 2007
+johantheghost at yahoo period com
+
+
+======================== Sep 07 ==========================
+
+v5.0
+
+All the work I've done on this release is Linux-based, because that's
+the only Unix I have access to. I've tried to make it more OS-agnostic
+if anything; for example, it no longer has to figure out the format reported
+by /usr/bin/time. However, it's possible that portability has been damaged.
+If anyone wants to fix this, please feel free to mail me patches.
+
+In particular, the analysis of the system's CPUs is done via /proc/cpuinfo.
+For systems which don't have this, please make appropriate changes in
+getCpuInfo() and getSystemInfo().
+
+The big change has been to make the tests multi-CPU aware. See the
+"Multiple CPUs" section in "USAGE" for details. Other changes:
+
+* Completely rewrote Run in Perl; drastically simplified the way data is
+ processed. The confusing system of interlocking shell and awk scripts is
+ now just one script. Various intermediate files used to store and process
+ results are now replaced by Perl data structures internal to the script.
+
+* Removed from the index runs file system read and write tests which were
+ ignored for the index and wasted about 10 minutes per run (see fstime.c).
+ The read and write tests can now be selected individually. Made fstime.c
+ take parameters, so we no longer need to build 3 versions of it.
+
+* Made the output file names unique; they are built from
+ hostname-date-sequence.
+
+* Worked on result reporting, error handling, and logging. See TESTS.
+ We now generate both text and HTML reports.
+
+* Removed some obsolete files.
+
+Index Changes
+-------------
+
+The index is still based on David Niemi's SPARCstation 20-61 (rated at 10.0),
+and the intention in the changes I've made has been to keep the tests
+unchanged, in order to maintain consistency with old result sets.
+
+However, the following changes have been made to the index:
+
+* The Pipe-based Context Switching test (context1) was being dropped
+ from the index report in v4.1.0 due to a bug; I've put it back in.
+
+* I've added shell1 to the index, to get a measure of how the shell tests
+ scale with multiple CPUs (shell8 already exercises all the CPUs, even
+ in single-copy mode). I made up the baseline score for this by
+ extrapolation.
+
+Both of these test can be dropped, if you wish, by editing the "TEST
+SPECIFICATIONS" section of Run.
+
+Ian Smith, September 20, 2007
+johantheghost at yahoo period com
+
+======================== Aug 97 ==========================
+
+v4.1.0
+
+Double precision Whetstone put in place instead of the old "double" benchmark.
+
+Removal of some obsolete files.
+
+"system" suite adds shell8.
+
+perlbench and poll added as "exhibition" (non-index) benchmarks.
+
+Incorporates several suggestions by Andre Derrick Balsa
+
+Code cleanups to reduce compiler warnings by David C Niemi
+and Andy Kahn ; Digital Unix options by Andy Kahn.
+
+======================== Jun 97 ==========================
+
+v4.0.1
+
+Minor change to fstime.c to fix overflow problems on fast machines. Counting
+is now done in units of 256 (smallest BUFSIZE) and unsigned longs are used,
+giving another 23 dB or so of headroom ;^) Results should be virtually
+identical aside from very small rounding errors.
+
+======================== Dec 95 ==========================
+
+v4.0
+
+Byte no longer seems to have anything to do with this benchmark, and I was
+unable to reach any of the original authors; so I have taken it upon myself
+to clean it up.
+
+This is version 4. Major assumptions made in these benchmarks have changed
+since they were written, but they are nonetheless popular (particularly for
+measuring hardware for Linux). Some changes made:
+
+- The biggest change is to put a lot more operating system-oriented
+ tests into the index. I experimented for a while with a decibel-like
+ logarithmic scale, but finally settled on using a geometric mean for
+ the final index (the individual scores are a normalized, and their
+ logs are averaged; the resulting value is exponentiated).
+
+ "George", certain SPARCstation 20-61 with 128 MB RAM, a SPARC Storage
+ Array, and Solaris 2.3 is my new baseline; it is rated at 10.0 in each
+ of the index scores for a final score of 10.0.
+
+ Overall I find the geometric averaging is a big improvement for
+ avoiding the skew that was once possible (e.g. a Pentium-75 which got
+ 40 on the buggy version of fstime, such that fstime accounted for over
+ half of its total score and hence wildly skewed its average).
+
+ I also expect that the new numbers look different enough from the old
+ ones that no one is too likely to casually mistake them for each other.
+
+ I am finding new SPARCs running Solaris 2.4 getting about 15-20, and
+ my 486 DX2-66 Compaq running Linux 1.3.45 got a 9.1. It got
+ understandably poor scores on CPU and FPU benchmarks (a horrible
+ 1.8 on "double" and 1.3 on "fsdisk"); but made up for it by averaging
+ over 20 on the OS-oriented benchmarks. The Pentium-75 running
+ Linux gets about 20 (and it *still* runs Windows 3.1 slowly. Oh well).
+
+- It is difficult to get a modern compiler to even consider making
+ dhry2 without registers, short of turning off *all* optimizations.
+ This is also not a terribly meaningful test, even if it were possible,
+ as noone compiles without registers nowadays. Replaced this benchmark
+ with dhry2reg in the index, and dropped it out of usage in general as
+ it is so hard to make a legitimate one.
+
+- fstime: this had some bugs when compiled on modern systems which return
+ the number of bytes read/written for read(2)/write(2) calls. The code
+ assumed that a negative return code was given for EOF, but most modern
+ systems return 0 (certainly on SunOS 4, Solaris2, and Linux, which is
+ what counts for me). The old code yielded wildly inflated read scores,
+ would eat up tens of MB of disk space on fast systems, and yielded
+ roughly 50% lower than normal copy scores than it should have.
+
+ Also, it counted partial blocks *fully*; made it count the proportional
+ part of the block which was actually finished.
+
+ Made bigger and smaller variants of fstime which are designed to beat
+ up the disk I/O and the buffer cache, respectively. Adjusted the
+ sleeps so that they are short for short benchmarks.
+
+- Instead of 1,2,4, and 8-shell benchmarks, went to 1, 8, and 16 to
+ give a broader range of information (and to run 1 fewer test).
+ The only real problem with this is that not many iterations get
+ done with 16 at a time on slow systems, so there are some significant
+ rounding errors; 8 therefore still used for the benchmark. There is
+ also the problem that the last (uncompleted) loop is counted as a full
+ loop, so it is impossible to score below 1.0 lpm (which gave my laptop
+ a break). Probably redesigning Shell to do each loop a bit more
+ quickly (but with less intensity) would be a good idea.
+
+ This benchmark appears to be very heavily influenced by the speed
+ of the loader, by which shell is being used as /bin/sh, and by how
+ well-compiled some of the common shell utilities like grep, sed, and
+ sort are. With a consistent tool set it is also a good indicator of
+ the bandwidth between main memory and the CPU (e.g. Pentia score about
+ twice as high as 486es due to their 64-bit bus). Small, sometimes
+ broken shells like "ash-linux" do particularly well here, while big,
+ robust shells like bash do not.
+
+- "dc" is a somewhat iffy benchmark, because there are two versions of
+ it floating around, one being small, very fast, and buggy, and one
+ being more correct but slow. It was never in the index anyway.
+
+- Execl is a somewhat troubling benchmark in that it yields much higher
+ scores if compiled statically. I frown on this practice because it
+ distorts the scores away from reflecting how programs are really used
+ (i.e. dynamically linked).
+
+- Arithoh is really more an indicator of the compiler quality than of
+ the computer itself. For example, GCC 2.7.x with -O2 and a few extra
+ options optimizes much of it away, resulting in about a 1200% boost
+ to the score. Clearly not a good one for the index.
+
+I am still a bit unhappy with the variance in some of the benchmarks, most
+notably the fstime suite; and with how long it takes to run. But I think
+it gets significantly more reliable results than the older version in less
+time.
+
+If anyone has ideas on how to make these benchmarks faster, lower-variance,
+or more meaningful; or has nice, new, portable benchmarks to add, don't
+hesitate to e-mail me.
+
+David C Niemi 7 Dec 1995
+
+======================== May 91 ==========================
+This is version 3. This set of programs should be able to determine if
+your system is BSD or SysV. (It uses the output format of time (1)
+to see. If you have any problems, contact me (by email,
+preferably): ben@bytepb.byte.com
+
+---
+
+The document doc/bench.doc describes the basic flow of the
+benchmark system. The document doc/bench3.doc describes the major
+changes in design of this version. As a user of the benchmarks,
+you should understand some of the methods that have been
+implemented to generate loop counts:
+
+Tests that are compiled C code:
+ The function wake_me(second, func) is included (from the file
+timeit.c). This function uses signal and alarm to set a countdown
+for the time request by the benchmark administration script
+(Run). As soon as the clock is started, the test is run with a
+counter keeping track of the number of loops that the test makes.
+When alarm sends its signal, the loop counter value is sent to stderr
+and the program terminates. Since the time resolution, signal
+trapping and other factors don't insure that the test is for the
+precise time that was requested, the test program is also run
+from the time (1) command. The real time value returned from time
+(1) is what is used in calculating the number of loops per second
+(or minute, depending on the test). As is obvious, there is some
+overhead time that is not taken into account, therefore the
+number of loops per second is not absolute. The overhead of the
+test starting and stopping and the signal and alarm calls is
+common to the overhead of real applications. If a program loads
+quickly, the number of loops per second increases; a phenomenon
+that favors systems that can load programs quickly. (Setting the
+sticky bit of the test programs is not considered fair play.)
+
+Test that use existing UNIX programs or shell scripts:
+ The concept is the same as that of compiled tests, except the
+alarm and signal are contained in separate compiled program,
+looper (source is looper.c). Looper uses an execvp to invoke the
+test with its arguments. Here, the overhead includes the
+invocation and execution of looper.
+
+--
+
+The index numbers are generated from a baseline file that is in
+pgms/index.base. You can put tests that you wish in this file.
+All you need to do is take the results/log file from your
+baseline machine, edit out the comment and blank lines, and sort
+the result (vi/ex command: 1,$!sort). The sort in necessary
+because the process of generating the index report uses join (1).
+You can regenerate the reports by running "make report."
+
+--
+
+========================= Jan 90 =============================
+Tom Yager has joined the effort here at BYTE; he is responsible
+for many refinements in the UNIX benchmarks.
+
+The memory access tests have been deleted from the benchmarks.
+The file access tests have been reversed so that the test is run
+for a fixed time. The amount of data transfered (written, read,
+and copied) is the variable. !WARNING! This test can eat up a
+large hunk of disk space.
+
+The initial line of all shell scripts has been changed from the
+SCO and XENIX form (:) to the more standard form "#! /bin/sh".
+But different systems handle shell switching differently. Check
+the documentation on your system and find out how you are
+supposed to do it. Or, simpler yet, just run the benchmarks from
+the Bourne shell. (You may need to set SHELL=/bin/sh as well.)
+
+The options to Run have not been checked in a while. They may no
+longer function. Next time, I'll get back on them. There needs to
+be another option added (next time) that halts testing between
+each test. !WARNING! Some systems have caches that are not getting flushed
+before the next test or iteration is run. This can cause
+erroneous values.
+
+========================= Sept 89 =============================
+The database (db) programs now have a tuneable message queue space.
+queue space. The default set in the Run script is 1024 bytes.
+Other major changes are in the format of the times. We now show
+Arithmetic and Geometric mean and standard deviation for User
+Time, System Time, and Real Time. Generally, in reporting, we
+plan on using the Real Time values with the benchs run with one
+active user (the bench user). Comments and arguments are requested.
+
+contact: BIX bensmith or rick_g
diff --git a/testcases/feature-test/oeaware/UnixBench/Run b/testcases/feature-test/oeaware/UnixBench/Run
new file mode 100755
index 000000000..d6159355e
--- /dev/null
+++ b/testcases/feature-test/oeaware/UnixBench/Run
@@ -0,0 +1,1836 @@
+#!/usr/bin/perl -w
+
+use strict;
+
+use POSIX qw(strftime);
+use Time::HiRes;
+use IO::Handle;
+
+
+############################################################################
+# UnixBench - Release 5.1.3, based on:
+# The BYTE UNIX Benchmarks - Release 3
+# Module: Run SID: 3.11 5/15/91 19:30:14
+# Original Byte benchmarks written by:
+# Ben Smith, Tom Yager at BYTE Magazine
+# ben@bytepb.byte.com tyager@bytepb.byte.com
+# BIX: bensmith tyager
+#
+#######################################################################
+# General Purpose Benchmark
+# based on the work by Ken McDonell, Computer Science, Monash University
+#
+# You will need ...
+# perl Time::HiRes IO::Handlecat cc chmod comm cp date dc df echo
+# kill ls make mkdir rm sed test time touch tty umask who
+###############################################################################
+# Modification Log:
+# $Header: run,v 5.2 88/01/12 06:23:43 kenj Exp $
+# Ken McDonell, Computer Science, Monash University
+# August 1, 1983
+# 3/89 - Ben Smith - BYTE: globalized many variables, modernized syntax
+# 5/89 - commented and modernized. Removed workload items till they
+# have been modernized. Added database server test.
+# 11/14/89 - Made modifications to reflect new version of fstime
+# and elimination of mem tests.
+# 10/22/90 - Many tests have been flipped so that they run for
+# a specified length of time and loops are counted.
+# 4/3/91 - Cleaned up and debugged several test parameters - Ben
+# 4/9/91 - Added structure for creating index and determing flavor of UNIX
+# 4/26/91 - Made changes and corrections suggested by Tin Le of Sony
+# 5/15/91 - Removed db from distribution
+# 4/4/92 Jon Tombs fixed for GNU time to look like
+# BSD (don't know the format of sysV!)
+# 12/95 - Massive changes for portability, speed, and more meaningful index
+# DCN David C Niemi
+# 1997.06.20 DCN Fixed overflow condition in fstime.c on fast machines
+# 1997.08.24 DCN Modified "system", replaced double with
+# whetstone-double in "index"
+# 1997.09.10 DCN Added perlbench as an Exhibition benchmark
+# 1997.09.23 DCN Added rgooch's select as an Exhibition benchmark
+# 1999.07.28 DCN "select" not compiled or run by default, because it
+# does not compile on many platforms. PerlBench also
+# not run by default.
+# 2007.09.26 IS Huge rewrite -- see release notes in README.
+# 2007.10.12 IS Added graphics tests, categories feature.
+# 2007.10.14 IS Set and report LANG. Added "grep" and "sysexec".
+# 2007.12.22 IS Tiny fixes; see README.
+# 2011.01.13 KDL Fix for parallel compilation.
+
+
+############################################################################
+# CONFIGURATION
+############################################################################
+
+# Version number of the script.
+my $version = "5.1.3";
+
+# The setting of LANG makes a huge difference to some of the scores,
+# particularly depending on whether UTF-8 is used. So we always set
+# it to the same value, which is configured here.
+#
+# If you want your results to be meaningful when compared to other peoples'
+# results, you should not change this. Change it if you want to measure the
+# effect of different languages.
+my $language = "en_US.utf8";
+
+# The number of iterations per test.
+my $longIterCount = 10;
+my $shortIterCount = 3;
+
+# C compiler to use in compilation tests.
+my $cCompiler = 'gcc';
+
+# Establish full paths to directories. These need to be full pathnames
+# (or do they, any more?). They can be set in env.
+# variables whose names are the first parameter to getDir() below.
+my $BASEDIR = `pwd`;
+chomp($BASEDIR);
+
+# Directory where the test programs live.
+my $BINDIR = getDir('UB_BINDIR', $BASEDIR . "/pgms");
+
+# Temp directory, for temp files.
+my $TMPDIR = getDir('UB_TMPDIR', $BASEDIR . "/tmp");
+
+# Directory to put results in.
+my $RESULTDIR = getDir('UB_RESULTDIR', $BASEDIR . "/results");
+
+# Directory where the tests are executed.
+my $TESTDIR = getDir('UB_TESTDIR', $BASEDIR . "/testdir");
+
+
+############################################################################
+# TEST SPECIFICATIONS
+############################################################################
+
+# Configure the categories to which tests can belong.
+my $testCats = {
+ 'system' => { 'name' => "System Benchmarks", 'maxCopies' => 16 },
+ '2d' => { 'name' => "2D Graphics Benchmarks", 'maxCopies' => 1 },
+ '3d' => { 'name' => "3D Graphics Benchmarks", 'maxCopies' => 1 },
+ 'misc' => { 'name' => "Non-Index Benchmarks", 'maxCopies' => 16 },
+};
+
+
+my $arithmetic = [
+ "arithoh", "short", "int", "long", "float", "double", "whetstone-double"
+];
+
+my $fs = [
+ "fstime-w", "fstime-r", "fstime",
+ "fsbuffer-w", "fsbuffer-r", "fsbuffer",
+ "fsdisk-w", "fsdisk-r", "fsdisk"
+];
+
+my $oldsystem = [
+ "execl", "fstime", "fsbuffer", "fsdisk", "pipe", "context1", "spawn",
+ "syscall"
+];
+
+my $system = [
+ @$oldsystem, "shell1", "shell8", "shell16"
+];
+
+my $index = [
+ "dhry2reg", "whetstone-double", @$oldsystem, "shell1", "shell8"
+];
+
+my $graphics = [
+ "2d-rects", "2d-ellipse", "2d-aashapes", "2d-text", "2d-blit",
+ "2d-window", "ubgears"
+];
+
+
+# List of all supported test names.
+my $testList = {
+ # Individual tests.
+ "dhry2reg" => undef,
+ "whetstone-double" => undef,
+ "syscall" => undef,
+ "pipe" => undef,
+ "context1" => undef,
+ "spawn" => undef,
+ "execl" => undef,
+ "fstime-w" => undef,
+ "fstime-r" => undef,
+ "fstime" => undef,
+ "fsbuffer-w" => undef,
+ "fsbuffer-r" => undef,
+ "fsbuffer" => undef,
+ "fsdisk-w" => undef,
+ "fsdisk-r" => undef,
+ "fsdisk" => undef,
+ "shell1" => undef,
+ "shell8" => undef,
+ "shell16" => undef,
+ "short" => undef,
+ "int" => undef,
+ "long" => undef,
+ "float" => undef,
+ "double" => undef,
+ "arithoh" => undef,
+ "C" => undef,
+ "dc" => undef,
+ "hanoi" => undef,
+ "grep" => undef,
+ "sysexec" => undef,
+
+ "2d-rects" => undef,
+ "2d-lines" => undef,
+ "2d-circle" => undef,
+ "2d-ellipse" => undef,
+ "2d-shapes" => undef,
+ "2d-aashapes" => undef,
+ "2d-polys" => undef,
+ "2d-text" => undef,
+ "2d-blit" => undef,
+ "2d-window" => undef,
+
+ "ubgears" => undef,
+
+ # Named combos and shorthands.
+ "arithmetic" => $arithmetic,
+ "dhry" => [ "dhry2reg" ],
+ "dhrystone" => [ "dhry2reg" ],
+ "whets" => [ "whetstone-double" ],
+ "whetstone" => [ "whetstone-double" ],
+ "load" => [ "shell" ],
+ "misc" => [ "C", "dc", "hanoi" ],
+ "speed" => [ @$arithmetic, @$system ],
+ "oldsystem" => $oldsystem,
+ "system" => $system,
+ "fs" => $fs,
+ "shell" => [ "shell1", "shell8", "shell16" ],
+ "graphics" => $graphics,
+
+ # The tests which constitute the official index.
+ "index" => $index,
+
+ # The tests which constitute the official index plus the graphics
+ # index.
+ "gindex" => [ @$index, @$graphics ],
+};
+
+
+# Default parameters for benchmarks. Note that if "prog" is used,
+# it must contain just the program name, as it will be quoted (this
+# is necessary if BINDIR contains spaces). Put any options in "options".
+my $baseParams = {
+ "prog" => undef,
+ "options" => "",
+ "repeat" => 'short',
+ "stdout" => 1, # Non-0 to keep stdout.
+ "stdin" => "",
+ "logmsg" => "",
+};
+
+
+# Individual parameters for all benchmarks.
+my $testParams = {
+
+ ##########################
+ ## System Benchmarks ##
+ ##########################
+
+ "dhry2reg" => {
+ "logmsg" => "Dhrystone 2 using register variables",
+ "cat" => 'system',
+ "options" => "10",
+ "repeat" => 'long',
+ },
+ "whetstone-double" => {
+ "logmsg" => "Double-Precision Whetstone",
+ "cat" => 'system',
+ "repeat" => 'long',
+ },
+ "syscall" => {
+ "logmsg" => "System Call Overhead",
+ "cat" => 'system',
+ "repeat" => 'long',
+ "options" => "10",
+ },
+ "context1" => {
+ "logmsg" => "Pipe-based Context Switching",
+ "cat" => 'system',
+ "repeat" => 'long',
+ "options" => "10",
+ },
+ "pipe" => {
+ "logmsg" => "Pipe Throughput",
+ "cat" => 'system',
+ "repeat" => 'long',
+ "options" => "10",
+ },
+ "spawn" => {
+ "logmsg" => "Process Creation",
+ "cat" => 'system',
+ "options" => "30",
+ },
+ "execl" => {
+ "logmsg" => "Execl Throughput",
+ "cat" => 'system',
+ "options" => "30",
+ },
+ "fstime-w" => {
+ "logmsg" => "File Write 1024 bufsize 2000 maxblocks",
+ "cat" => 'system',
+ "prog" => "${BINDIR}/fstime",
+ "options" => "-w -t 30 -d \"${TMPDIR}\" -b 1024 -m 2000",
+ },
+ "fstime-r" => {
+ "logmsg" => "File Read 1024 bufsize 2000 maxblocks",
+ "cat" => 'system',
+ "prog" => "${BINDIR}/fstime",
+ "options" => "-r -t 30 -d \"${TMPDIR}\" -b 1024 -m 2000",
+ },
+ "fstime" => {
+ "logmsg" => "File Copy 1024 bufsize 2000 maxblocks",
+ "cat" => 'system',
+ "prog" => "${BINDIR}/fstime",
+ "options" => "-c -t 30 -d \"${TMPDIR}\" -b 1024 -m 2000",
+ },
+ "fsbuffer-w" => {
+ "logmsg" => "File Write 256 bufsize 500 maxblocks",
+ "cat" => 'system',
+ "prog" => "${BINDIR}/fstime",
+ "options" => "-w -t 30 -d \"${TMPDIR}\" -b 256 -m 500",
+ },
+ "fsbuffer-r" => {
+ "logmsg" => "File Read 256 bufsize 500 maxblocks",
+ "cat" => 'system',
+ "prog" => "${BINDIR}/fstime",
+ "options" => "-r -t 30 -d \"${TMPDIR}\" -b 256 -m 500",
+ },
+ "fsbuffer" => {
+ "logmsg" => "File Copy 256 bufsize 500 maxblocks",
+ "cat" => 'system',
+ "prog" => "${BINDIR}/fstime",
+ "options" => "-c -t 30 -d \"${TMPDIR}\" -b 256 -m 500",
+ },
+ "fsdisk-w" => {
+ "logmsg" => "File Write 4096 bufsize 8000 maxblocks",
+ "cat" => 'system',
+ "prog" => "${BINDIR}/fstime",
+ "options" => "-w -t 30 -d \"${TMPDIR}\" -b 4096 -m 8000",
+ },
+ "fsdisk-r" => {
+ "logmsg" => "File Read 4096 bufsize 8000 maxblocks",
+ "cat" => 'system',
+ "prog" => "${BINDIR}/fstime",
+ "options" => "-r -t 30 -d \"${TMPDIR}\" -b 4096 -m 8000",
+ },
+ "fsdisk" => {
+ "logmsg" => "File Copy 4096 bufsize 8000 maxblocks",
+ "cat" => 'system',
+ "prog" => "${BINDIR}/fstime",
+ "options" => "-c -t 30 -d \"${TMPDIR}\" -b 4096 -m 8000",
+ },
+ "shell1" => {
+ "logmsg" => "Shell Scripts (1 concurrent)",
+ "cat" => 'system',
+ "prog" => "${BINDIR}/looper",
+ "options" => "60 \"${BINDIR}/multi.sh\" 1",
+ },
+ "shell8" => {
+ "logmsg" => "Shell Scripts (8 concurrent)",
+ "cat" => 'system',
+ "prog" => "${BINDIR}/looper",
+ "options" => "60 \"${BINDIR}/multi.sh\" 8",
+ },
+ "shell16" => {
+ "logmsg" => "Shell Scripts (16 concurrent)",
+ "cat" => 'system',
+ "prog" => "${BINDIR}/looper",
+ "options" => "60 \"${BINDIR}/multi.sh\" 16",
+ },
+
+ ##########################
+ ## Graphics Benchmarks ##
+ ##########################
+
+ "2d-rects" => {
+ "logmsg" => "2D graphics: rectangles",
+ "cat" => '2d',
+ "prog" => "${BINDIR}/gfx-x11",
+ "options" => "rects 3 2",
+ },
+
+ "2d-lines" => {
+ "logmsg" => "2D graphics: lines",
+ "cat" => '2d',
+ "prog" => "${BINDIR}/gfx-x11",
+ "options" => "lines 3 2",
+ },
+
+ "2d-circle" => {
+ "logmsg" => "2D graphics: circles",
+ "cat" => '2d',
+ "prog" => "${BINDIR}/gfx-x11",
+ "options" => "circle 3 2",
+ },
+
+ "2d-ellipse" => {
+ "logmsg" => "2D graphics: ellipses",
+ "cat" => '2d',
+ "prog" => "${BINDIR}/gfx-x11",
+ "options" => "ellipse 3 2",
+ },
+
+ "2d-shapes" => {
+ "logmsg" => "2D graphics: polygons",
+ "cat" => '2d',
+ "prog" => "${BINDIR}/gfx-x11",
+ "options" => "shapes 3 2",
+ },
+
+ "2d-aashapes" => {
+ "logmsg" => "2D graphics: aa polygons",
+ "cat" => '2d',
+ "prog" => "${BINDIR}/gfx-x11",
+ "options" => "aashapes 3 2",
+ },
+
+ "2d-polys" => {
+ "logmsg" => "2D graphics: complex polygons",
+ "cat" => '2d',
+ "prog" => "${BINDIR}/gfx-x11",
+ "options" => "polys 3 2",
+ },
+
+ "2d-text" => {
+ "logmsg" => "2D graphics: text",
+ "cat" => '2d',
+ "prog" => "${BINDIR}/gfx-x11",
+ "options" => "text 3 2",
+ },
+
+ "2d-blit" => {
+ "logmsg" => "2D graphics: images and blits",
+ "cat" => '2d',
+ "prog" => "${BINDIR}/gfx-x11",
+ "options" => "blit 3 2",
+ },
+
+ "2d-window" => {
+ "logmsg" => "2D graphics: windows",
+ "cat" => '2d',
+ "prog" => "${BINDIR}/gfx-x11",
+ "options" => "window 3 2",
+ },
+
+ "ubgears" => {
+ "logmsg" => "3D graphics: gears",
+ "cat" => '3d',
+ "options" => "-time 20 -v",
+ },
+
+
+ ##########################
+ ## Non-Index Benchmarks ##
+ ##########################
+
+ "C" => {
+ "logmsg" => "C Compiler Throughput ($cCompiler)",
+ "cat" => 'misc',
+ "prog" => "${BINDIR}/looper",
+ "options" => "60 $cCompiler cctest.c",
+ },
+ "arithoh" => {
+ "logmsg" => "Arithoh",
+ "cat" => 'misc',
+ "options" => "10",
+ },
+ "short" => {
+ "logmsg" => "Arithmetic Test (short)",
+ "cat" => 'misc',
+ "options" => "10",
+ },
+ "int" => {
+ "logmsg" => "Arithmetic Test (int)",
+ "cat" => 'misc',
+ "options" => "10",
+ },
+ "long" => {
+ "logmsg" => "Arithmetic Test (long)",
+ "cat" => 'misc',
+ "options" => "10",
+ },
+ "float" => {
+ "logmsg" => "Arithmetic Test (float)",
+ "cat" => 'misc',
+ "options" => "10",
+ },
+ "double" => {
+ "logmsg" => "Arithmetic Test (double)",
+ "cat" => 'misc',
+ "options" => "10",
+ },
+ "dc" => {
+ "logmsg" => "Dc: sqrt(2) to 99 decimal places",
+ "cat" => 'misc',
+ "prog" => "${BINDIR}/looper",
+ "options" => "30 dc",
+ "stdin" => "dc.dat",
+ },
+ "hanoi" => {
+ "logmsg" => "Recursion Test -- Tower of Hanoi",
+ "cat" => 'misc',
+ "options" => "20",
+ },
+ "grep" => {
+ "logmsg" => "Grep a large file (system's grep)",
+ "cat" => 'misc',
+ "prog" => "${BINDIR}/looper",
+ "options" => "30 grep -c gimp large.txt",
+ },
+ "sysexec" => {
+ "logmsg" => "Exec System Call Overhead",
+ "cat" => 'misc',
+ "repeat" => 'long',
+ "prog" => "${BINDIR}/syscall",
+ "options" => "10 exec",
+ },
+};
+
+
+# CPU flags of interest.
+my $x86CpuFlags = {
+ 'pae' => "Physical Address Ext",
+ 'sep' => "SYSENTER/SYSEXIT",
+ 'syscall' => "SYSCALL/SYSRET",
+ 'mmx' => "MMX",
+ 'mmxext' => "AMD MMX",
+ 'cxmmx' => "Cyrix MMX",
+ 'xmm' => "Streaming SIMD",
+ 'xmm2' => "Streaming SIMD-2",
+ 'xmm3' => "Streaming SIMD-3",
+ 'ht' => "Hyper-Threading",
+ 'ia64' => "IA-64 processor",
+ 'lm' => "x86-64",
+ 'vmx' => "Intel virtualization",
+ 'svm' => "AMD virtualization",
+};
+
+
+############################################################################
+# UTILITIES
+############################################################################
+
+# Exec the given command, and catch its standard output.
+# We return an array containing the PID and the filehandle on the
+# process' standard output. It's up to the caller to wait for the command
+# to terminate.
+sub command {
+ my ( $cmd ) = @_;
+
+ my $pid = open(my $childFd, "-|");
+ if (!defined($pid)) {
+ die("Run: fork() failed (undef)\n");
+ } elsif ($pid == 0) {
+ exec($cmd);
+ die("Run: exec() failed (returned)\n");
+ }
+
+ return ( $pid, $childFd );
+}
+
+
+# Get data from running a system command. Used for things like getting
+# the host OS from `uname -o` etc.
+#
+# Ignores initial blank lines from the command and returns the first
+# non-blank line, with white space trimmed off. Returns a blank string
+# if there is no output; undef if the command fails.
+sub getCmdOutput {
+ my ( $cmd ) = @_;
+
+ my ( $pid, $fd ) = command($cmd . " 2>/dev/null");
+ my $result = "";
+ while (<$fd>) {
+ chomp;
+ next if /^[ \t]*$/;
+
+ $result = $_;
+ $result =~ s/^[ \t]+//;
+ $result =~ s/[ \t]+$//;
+ last;
+ }
+
+ # Close the command and wait for it to die.
+ waitpid($pid, 0);
+ my $status = $?;
+
+ return $status == 0 ? $result : undef;
+}
+
+
+# Get a directory pathname from an environment variable, or the given
+# default. Canonicalise and return the value.
+sub getDir {
+ my ( $var, $def ) = @_;
+
+ my $val = $ENV{$var} || $def;
+
+ # Canonicalise the value.
+ my $wd;
+ chomp($wd = `pwd`);
+ chdir($val);
+ chomp($val = `pwd`);
+ chdir($wd);
+ $ENV{$var} = $val;
+
+ $val;
+}
+
+
+# Get the name of the file we're going to log to. The name uses the hostname
+# and date, plus a sequence number to make it unique.
+sub logFile {
+ my ( $sysInfo ) = @_;
+
+ my $count = 1;
+
+ # Use the date in the base file name.
+ my $ymd = strftime "%Y-%m-%d", localtime;
+
+ while (1) {
+ my $log = sprintf "%s/%s-%s-%02d",
+ ${RESULTDIR}, $sysInfo->{'name'}, $ymd, $count;
+ return $log if (! -e $log);
+ ++$count;
+ }
+}
+
+
+# Print a message to the named log file. We use this method rather than
+# keeping the FD open because we use shell redirection to send command
+# output to the same file.
+sub printLog {
+ my ( $logFile, @args ) = @_;
+
+ open(my $fd, ">>", $logFile) || abortRun("can't append to $logFile");
+ printf $fd @args;
+ close($fd);
+}
+
+
+# Display a number of something, auto-selecting the plural form
+# if appropriate. We are given the number, the singular, and the
+# plural; if the plural is omitted, it defaults to singular + "s".
+sub number {
+ my ( $n, $what, $plural ) = @_;
+
+ $plural = $what . "s" if !defined($plural);
+
+ if (!defined($n)) {
+ return sprintf "unknown %s", $plural;
+ } else {
+ return sprintf "%d %s", $n, $n == 1 ? $what : $plural;
+ }
+}
+
+
+# Merge two sets of test parameters -- defaults and actual parameters.
+# Return the merged parameter hash.
+sub mergeParams {
+ my ( $def, $vals ) = @_;
+
+ my $params = { };
+ foreach my $k (keys(%$def)) {
+ $params->{$k} = $def->{$k};
+ }
+ foreach my $k (keys(%$vals)) {
+ $params->{$k} = $vals->{$k};
+ }
+
+ $params;
+}
+
+
+############################################################################
+# SYSTEM ANALYSIS
+############################################################################
+
+# Extract interesting flags from the given processor flags string and
+# convert them to descriptive names.
+sub processCpuFlags {
+ my ( $flagStr ) = @_;
+
+ my @names;
+ foreach my $f (sort split(/\s+/, $flagStr)) {
+ my $name = $x86CpuFlags->{$f};
+ push(@names, $name) if $name;
+ }
+
+ join(", ", @names);
+}
+
+
+# Get information on the CPUs in the system. Returns a reference to an
+# array of N entries, one per CPU, where each entry is a hash containing
+# these fields:
+# describing the model etc. Returns undef if the information can't be got.
+sub getCpuInfo {
+ open(my $fd, "<", "/proc/cpuinfo") || return undef;
+
+ my $cpus = [ ];
+ my $cpu = 0;
+ while (<$fd>) {
+ chomp;
+ my ( $field, $val ) = split(/[ \t]*:[ \t]*/);
+ next if (!$field || !$val);
+ if ($field eq "processor") {
+ $cpu = $val;
+ } elsif ($field eq "model name") {
+ my $model = $val;
+ $model =~ s/ +/ /g;
+ $cpus->[$cpu]{'model'} = $model;
+ } elsif ($field eq "bogomips") {
+ $cpus->[$cpu]{'bogo'} = $val;
+ } elsif ($field eq "flags") {
+ $cpus->[$cpu]{'flags'} = processCpuFlags($val);
+ }
+ }
+
+ close($fd);
+
+ $cpus;
+}
+
+
+# Get information on the host system. Returns a reference to a hash
+# with the following fields:
+# name Host name
+# os Host OS name
+# osRel Host OS release
+# osVer Host OS version
+# mach Host machine name (eg. "SparcStation 20", but on
+# PC/Linux usually "i686" etc.)
+# platform Hardware platform; on Linux, the base CPU type?
+# system System name (eg. hostname and Linux distro, like
+# "hostname: openSUSE 10.2 (i586)").
+# cpus Value returned by getCpuInfo(), undef if not avail.
+# numCpus Number of CPUs if known, else undef.
+# load System load message as per "uptime".
+# numUsers Number of users and/or open shell sessions.
+sub getSystemInfo {
+ my $info = { };
+
+ # Get host system data.
+ $info->{'name'} = getCmdOutput("hostname");
+ $info->{'os'} = getCmdOutput("uname -o") || getCmdOutput("uname -s");
+ $info->{'osRel'} = getCmdOutput("uname -r");
+ $info->{'osVer'} = getCmdOutput("uname -v");
+ $info->{'mach'} = getCmdOutput("uname -m");
+ $info->{'platform'} = getCmdOutput("uname -i");
+
+ # Get the system name (SUSE, Red Hat, etc.) if possible.
+ $info->{'system'} = $info->{'os'};
+ if ( -r "/etc/SuSE-release" ) {
+ $info->{'system'} = getCmdOutput("cat /etc/SuSE-release");
+ } elsif ( -r "/etc/release" ) {
+ $info->{'system'} = getCmdOutput("cat /etc/release");
+ }
+
+ # Get the language info.
+ my $lang = getCmdOutput("printenv LANG");
+ my $map = getCmdOutput("locale -k LC_CTYPE | grep charmap");
+ $map =~ s/.*=//;
+ my $coll = getCmdOutput("locale -k LC_COLLATE | grep collate-codeset");
+ $coll =~ s/.*=//;
+ $info->{'language'} = sprintf "%s (charmap=%s, collate=%s)",
+ $lang, $map, $coll;
+
+ # Get details on the CPUs, if possible.
+ my $cpus = getCpuInfo();
+ if (defined($cpus)) {
+ $info->{'cpus'} = $cpus;
+ $info->{'numCpus'} = scalar(@$cpus);
+ }
+
+ # Get graphics hardware info.
+ $info->{'graphics'} = getCmdOutput("3dinfo | cut -f1 -d\'(\'");
+
+ # Get system run state, load and usage info.
+ $info->{'runlevel'} = getCmdOutput("runlevel | cut -f2 -d\" \"");
+ $info->{'load'} = getCmdOutput("uptime");
+ $info->{'numUsers'} = getCmdOutput("who | wc -l");
+
+ $info;
+}
+
+
+############################################################################
+# ERROR HANDLING
+############################################################################
+
+# Abort the benchmarking run with an error message.
+sub abortRun {
+ my ( $err ) = @_;
+
+ printf STDERR "\n**********************************************\n";
+ printf STDERR "Run: %s; aborting\n", $err;
+ exit(1);
+}
+
+
+############################################################################
+# TEST SETUP
+############################################################################
+
+# Do checks that everything's ready for testing.
+sub preChecks {
+ # Set the language.
+ $ENV{'LANG'} = $language;
+
+ # Check that the required files are in the proper places.
+ system("make check");
+ if ($? != 0) {
+ system("make all");
+ if ($? != 0) {
+ abortRun("\"make all\" failed");
+ }
+ }
+
+ # Create a script to kill this run.
+ system("echo \"kill -9 $$\" > \"${TMPDIR}/kill_run\"");
+ chmod(0755, $TMPDIR . "/kill_run");
+}
+
+
+# Parse the command arguments.
+sub parseArgs {
+ my @words = @_;
+
+ # The accumulator for the bench units to be run.
+ my $tests = [ ];
+ my $params = { 'tests' => $tests };
+
+ # Generate the requested list of bench programs.
+ my $opt;
+ my $word;
+ while ($word = shift(@words)) {
+ if ($word !~ m/^-/) { # A test name.
+ if ($word eq "all") {
+ foreach my $t (keys(%$testList)) {
+ push(@$tests, $t) if (!defined($testList->{$t}));
+ }
+ } elsif (exists($testList->{$word})) {
+ my $val = $testList->{$word} || [ $word ];
+ push(@$tests, @$val);
+ } else {
+ die("Run: unknown test \"$word\"\n");
+ }
+ } elsif ($word eq "-q") {
+ $params->{'verbose'} = 0;
+ } elsif ($word eq "-v") {
+ $params->{'verbose'} = 2;
+ } elsif ($word eq "-i") {
+ $params->{'iterations'} = shift(@words);
+ } elsif ($word eq "-c") {
+ if (!defined($params->{'copies'})) {
+ $params->{'copies'} = [ ];
+ }
+ push(@{$params->{'copies'}}, shift(@words));
+ } else {
+ die("Run: unknown option $word\n");
+ }
+ }
+
+ $params;
+}
+
+
+############################################################################
+# RESULTS INPUT / OUTPUT
+############################################################################
+
+# Read a set of benchmarking results from the given file.
+# Returns results in the form returned by runTests(), but without the
+# individual pass results.
+sub readResultsFromFile {
+ my ( $file ) = @_;
+
+ # Attempt to get the baseline data file; if we can't, just return undef.
+ open(my $fd, "<", $file) || return undef;
+
+ my $results = { };
+ while (<$fd>) {
+ chomp;
+
+ # Dump comments, ignore blank lines.
+ s/#.*//;
+ next if /^\s*$/;
+
+ my ( $name, $time, $slab, $sum, $score, $iters ) = split(/\|/);
+ my $bresult = { };
+ $bresult->{'score'} = $score;
+ $bresult->{'scorelabel'} = $slab;
+ $bresult->{'time'} = $time;
+ $bresult->{'iterations'} = $iters;
+
+ $results->{$name} = $bresult;
+ }
+
+ close($fd);
+
+ $results;
+}
+
+
+############################################################################
+# RESULTS PROCESSING
+############################################################################
+
+# Process a set of results from a single test by averaging the individal
+# pass results into a single final value.
+# First, though, dump the worst 1/3 of the scores. The logic is that a
+# glitch in the system (background process waking up, for example) may
+# make one or two runs go slow, so let's discard those.
+#
+# $bresult is a hashed array representing the results of a single test;
+# $bresult->{'passes'} is an array of the output from the individual
+# passes.
+sub combinePassResults {
+ my ( $bench, $tdata, $bresult, $logFile ) = @_;
+
+ $bresult->{'cat'} = $tdata->{'cat'};
+
+ # Computed results.
+ my $iterations = 0;
+ my $totalTime = 0;
+ my $sum = 0;
+ my $product = 0;
+ my $label;
+
+ my $pres = $bresult->{'passes'};
+
+ # We're going to throw away the worst 1/3 of the pass results.
+ # Figure out how many to keep.
+ my $npasses = scalar(@$pres);
+ my $ndump = int($npasses / 3);
+
+ foreach my $presult (sort { $a->{'COUNT0'} <=> $b->{'COUNT0'} } @$pres) {
+ my $count = $presult->{'COUNT0'};
+ my $timebase = $presult->{'COUNT1'};
+ $label = $presult->{'COUNT2'};
+ my $time = $presult->{'TIME'} || $presult->{'elapsed'};
+
+ # Skip this result if it's one of the worst ones.
+ if ($ndump > 0) {
+ printLog($logFile, "*Dump score: %12.1f\n", $count);
+ --$ndump;
+ next;
+ }
+
+ # Count this result.
+ ++$iterations;
+ printLog($logFile, "Count score: %12.1f\n", $count);
+
+ # If $timebase is 0 the figure is a rate; else compute
+ # counts per $timebase. $time is always seconds.
+ if ($timebase > 0) {
+ $sum += $count / ($time / $timebase);
+ $product += log($count) - log($time / $timebase);
+ } else {
+ $sum += $count;
+ $product += log($count);
+ }
+ $totalTime += $time;
+ }
+
+ # Save the results for the benchmark.
+ if ($iterations > 0) {
+ $bresult->{'score'} = exp($product / $iterations);
+ $bresult->{'scorelabel'} = $label;
+ $bresult->{'time'} = $totalTime / $iterations;
+ $bresult->{'iterations'} = $iterations;
+ } else {
+ $bresult->{'error'} = "No measured results";
+ }
+}
+
+
+# Index the given full benchmark results against the baseline results.
+# $results is a hashed array of test names to test results.
+#
+# Adds the following fields to each benchmark result:
+# iscore The baseline score for this test
+# index The index of this test against the baseline
+# Adds the following fields to $results:
+# indexed The number of tests for which index values were
+# generated
+# fullindex Non-0 if all the index tests were indexed
+# index The computed overall index for the run
+# Note that the index values are computed as
+# result / baseline * 10
+# so an index of 523 indicates that a test ran 52.3 times faster than
+# the baseline.
+sub indexResults {
+ my ( $results ) = @_;
+
+ # Read in the baseline result data. If we can't get it, just return
+ # without making indexed results.
+ my $index = readResultsFromFile($BINDIR . "/index.base");
+ if (!defined($index)) {
+ return;
+ }
+
+ # Count the number of results we have (indexed or not) in
+ # each category.
+ my $numCat = { };
+ foreach my $bench (@{$results->{'list'}}) {
+ my $bresult = $results->{$bench};
+ ++$numCat->{$bresult->{'cat'}};
+ }
+ $results->{'numCat'} = $numCat;
+
+ my $numIndex = { };
+ my $indexed = { };
+ my $sum = { };
+ foreach my $bench (sort(keys(%$index))) {
+ # Get the test data for this benchmark.
+ my $tdata = $testParams->{$bench};
+ if (!defined($tdata)) {
+ abortRun("unknown benchmark \"$bench\" in $BINDIR/index.base");
+ }
+
+ # Get the test category. Count the total tests in this cat.
+ my $cat = $tdata->{'cat'};
+ ++$numIndex->{$cat};
+
+ # If we don't have a result for this test, skip.
+ next if (!defined($results->{$bench}));
+
+ # Get the index and actual results. Calcluate the score.
+ my $iresult = $index->{$bench};
+ my $bresult = $results->{$bench};
+ my $ratio = $bresult->{'score'} / $iresult->{'score'};
+
+ # Save the indexed score.
+ $bresult->{'iscore'} = $iresult->{'score'};
+ $bresult->{'index'} = $ratio * 10;
+
+ # Sun the scores, and count this test for this category.
+ $sum->{$cat} += log($ratio);
+ ++$indexed->{$cat};
+ }
+
+ # Calculate the index scores per category.
+ $results->{'indexed'} = $indexed;
+ $results->{'numIndex'} = $numIndex;
+ foreach my $c (keys(%$indexed)) {
+ if ($indexed->{$c} > 0) {
+ $results->{'index'}{$c} = exp($sum->{$c} / $indexed->{$c}) * 10;
+ }
+ }
+}
+
+
+############################################################################
+# TEST EXECUTION
+############################################################################
+
+# Exec the given command in a sub-process.
+#
+# In the child process, we run the command and store its standard output.
+# We also time its execution, and catch its exit status. We then write
+# the command's output, plus lines containing the execution time and status,
+# to a pipe.
+#
+# In the parent process, we immediately return an array containing the
+# child PID and the filehandle to the pipe. This allows the caller to
+# kick off multiple commands in parallel, then gather their output.
+sub commandBuffered {
+ my ( $cmd ) = @_;
+
+ # Create a pipe for parent-child communication.
+ my $childReader;
+ my $parentWriter;
+ pipe($childReader, $parentWriter) || abortRun("pipe() failed");
+ $parentWriter->autoflush(1);
+
+ # Fork off the child process.
+ my $pid = fork();
+ if (!defined($pid)) {
+ abortRun("fork() failed (undef)");
+ } elsif ($pid == 0) {
+ # Close the other end of the pipe.
+ close $childReader;
+
+ # Start the clock and spawn the command.
+ my $benchStart = Time::HiRes::time();
+ my ( $cmdPid, $cmdFd ) = command($cmd);
+
+ # Read and buffer all the command's output.
+ my $output = [ ];
+ while (<$cmdFd>) {
+ push(@$output, $_);
+ }
+
+ # Stop the clock and save the time.
+ my $elTime = Time::HiRes::time() - $benchStart;
+ push(@$output, sprintf "elapsed|%f\n", $elTime);
+
+ # Wait for the child to die so we can get its status.
+ # close($cmdFd); Doesn't work???
+ waitpid($cmdPid, 0);
+ my $status = $?;
+ push(@$output, sprintf "status|%d\n", $status);
+
+ # Now that we've got the time, play back all the output to the pipe.
+ # The parent can read this at its leisure.
+ foreach my $line (@$output) {
+ print $parentWriter $line;
+ }
+
+ # Terminate this child.
+ close $parentWriter;
+ exit(0);
+ }
+
+ # Close the other end of the pipe.
+ close $parentWriter;
+
+ return ( $pid, $childReader );
+}
+
+
+# Read the results of a benchmark execution from a child process, given
+# its process ID and its filehandle. Create a results hash structure
+# containing the fields returned by the child, plus:
+# pid The child's process ID
+# status The child's exit status
+# ERROR Any stderr output from the child that isn't result data
+# Note that ay result fields with ultiple values are split; so eg.
+# COUNT|x|y|x
+# becomes
+# COUNT0 = x
+# COUNT1 = y
+# COUNT2 = z
+sub readResults {
+ my ( $pid, $fd ) = @_;
+
+ my $presult = { 'pid' => $pid };
+
+ # Read all the result lines from the child.
+ while (<$fd>) {
+ chomp;
+
+ my ( $field, @params ) = split(/\|/);
+ if (scalar(@params) == 0) { # Error message.
+ $presult->{'ERROR'} .= "\n" if ($presult->{'ERROR'});
+ $presult->{'ERROR'} .= $field;
+ } elsif (scalar(@params) == 1) { # Simple data.
+ $presult->{$field} = $params[0];
+ } else { # Compound data.
+ # Store the values in separate fields, named "FIELD$i".
+ for (my $x = 0; $x < scalar(@params); ++$x) {
+ $presult->{$field . $x} = $params[$x];
+ }
+ }
+ }
+
+ # If the command had an error, make an appropriate message if we
+ # don't have one.
+ if ($presult->{'status'} != 0 && !defined($presult->{'ERROR'})) {
+ $presult->{'ERROR'} = "command returned status " . $presult->{'status'};
+ }
+
+ # Wait for the child to die.
+ close($fd);
+ waitpid($pid, 0);
+
+ $presult;
+}
+
+
+# Execute a benchmark command. We set off a given number of copies in
+# parallel to exercise multiple CPUs.
+#
+# We return an array of results hashes, one per copy; each one is as
+# returned by readResults().
+sub executeBenchmark {
+ my ( $command, $copies ) = @_;
+
+ # Array of contexts for all the copies we're running.
+ my $ctxt = [ ];
+
+ # Kick off all the commands at once.
+ for (my $i = 0; $i < $copies; ++$i) {
+ my ( $cmdPid, $cmdFd ) = commandBuffered($command);
+ $ctxt->[$i] = {
+ 'pid' => $cmdPid,
+ 'fd' => $cmdFd,
+ };
+ }
+
+ # Now, we can simply read back the command results in order. Because
+ # the child processes read and buffer the results and time the commands,
+ # there's no need to use select() to read the results as they appear.
+ my $pres = [ ];
+ for (my $i = 0; $i < $copies; ++$i) {
+ my $presult = readResults($ctxt->[$i]{'pid'}, $ctxt->[$i]{'fd'});
+ push(@$pres, $presult);
+ }
+
+ $pres;
+}
+
+
+# Run one iteration of a benchmark, as specified by the given
+# benchmark parameters. We run multiple parallel copies as
+# specified by $copies.
+sub runOnePass {
+ my ( $params, $verbose, $logFile, $copies ) = @_;
+
+ # Get the command to run.
+ my $command = $params->{'command'};
+ if ($verbose > 1) {
+ printf "\n";
+ printf "COMMAND: \"%s\"\n", $command;
+ printf "COPIES: \"%d\"\n", $copies;
+ }
+
+ # Remember where we are, and move to the test directory.
+ my $pwd = `pwd`;
+ chdir($TESTDIR);
+
+ # Execute N copies of the benchmark in parallel.
+ my $copyResults = executeBenchmark($command, $copies);
+ printLog($logFile, "\n");
+
+ # Move back home.
+ chdir($pwd);
+
+ # Sum up the scores of the copies.
+ my $count = 0;
+ my $time = 0;
+ my $elap = 0;
+ foreach my $res (@$copyResults) {
+ # Log the result data for each copy.
+ foreach my $k (sort(keys(%$res))) {
+ printLog($logFile, "# %s: %s\n", $k, $res->{$k});
+ }
+ printLog($logFile, "\n");
+
+ # If it failed, bomb out.
+ if (defined($res->{'ERROR'})) {
+ my $name = $params->{'logmsg'};
+ abortRun("\"$name\": " . $res->{'ERROR'});
+ }
+
+ # Count up the score.
+ $count += $res->{'COUNT0'};
+ $time += $res->{'TIME'} || $res->{'elapsed'};
+ $elap += $res->{'elapsed'};
+ }
+
+ # Make up a combined result.
+ my $passResult = $copyResults->[0];
+ $passResult->{'COUNT0'} = $count;
+ $passResult->{'TIME'} = $time / $copies;
+ $passResult->{'elapsed'} = $elap / $copies;
+
+ $passResult;
+}
+
+
+sub runBenchmark {
+ my ( $bench, $tparams, $verbose, $logFile, $copies ) = @_;
+
+ # Make up the actual benchmark parameters.
+ my $params = mergeParams($baseParams, $tparams);
+
+ # Make up the command string based on the parameters.
+ my $prog = $params->{'prog'} || $BINDIR . "/" . $bench;
+ my $command = sprintf "\"%s\" %s", $prog, $params->{'options'};
+ $command .= " < \"" . $params->{'stdin'} . "\"" if ($params->{'stdin'});
+ $command .= " 2>&1";
+ $command .= $params->{'stdout'} ? (" >> \"" . $logFile . "\"") : " > /dev/null";
+ $params->{'command'} = $command;
+
+ # Set up the benchmark results structure.
+ my $bresult = { 'name' => $bench, 'msg' => $params->{'logmsg'} };
+
+ if ($verbose > 0) {
+ printf "\n%d x %s ", $copies, $params->{'logmsg'};
+ }
+
+ printLog($logFile,
+ "\n########################################################\n");
+ printLog($logFile, "%s -- %s\n",
+ $params->{'logmsg'}, number($copies, "copy", "copies"));
+ printLog($logFile, "==> %s\n\n", $command);
+
+ # Run the test iterations, as given by the "repeat" parameter.
+ my $repeats = $shortIterCount;
+ $repeats = $longIterCount if $params->{'repeat'} eq 'long';
+ $repeats = 1 if $params->{'repeat'} eq 'single';
+ my $pres = [ ];
+ for (my $i = 1; $i <= $repeats; ++$i) {
+ printLog($logFile, "#### Pass %d\n\n", $i);
+
+ # make an attempt to flush buffers
+ system("sync; sleep 1; sync; sleep 2");
+ # display heartbeat
+ if ($verbose > 0) {
+ printf " %d", $i;
+ }
+
+ # Execute one pass of the benchmark.
+ my $presult = runOnePass($params, $verbose, $logFile, $copies);
+ push(@$pres, $presult);
+ }
+ $bresult->{'passes'} = $pres;
+
+ # Calculate the averaged results for this benchmark.
+ combinePassResults($bench, $tparams, $bresult, $logFile);
+
+ # Log the results.
+ if ($copies == 1) {
+ printLog($logFile, "\n>>>> Results of 1 copy\n");
+ } else {
+ printLog($logFile, "\n>>>> Sum of %d copies\n", $copies);
+ }
+ foreach my $k ( 'score', 'time', 'iterations' ) {
+ printLog($logFile, ">>>> %s: %s\n", $k, $bresult->{$k});
+ }
+ printLog($logFile, "\n");
+
+ # Some specific cleanup routines.
+ if ($bench eq "C") {
+ unlink(${TESTDIR} . "/cctest.o");
+ unlink(${TESTDIR} . "/a.out");
+ }
+
+ if ($verbose > 0) {
+ printf "\n";
+ }
+
+ $bresult;
+}
+
+
+# Run the named benchmarks.
+sub runTests {
+ my ( $tests, $verbose, $logFile, $copies ) = @_;
+
+ # Run all the requested tests and gather the results.
+ my $results = { 'start' => time(), 'copies' => $copies };
+ foreach my $bench (@$tests) {
+ # Get the parameters for this benchmark.
+ my $params = $testParams->{$bench};
+ if (!defined($params)) {
+ abortRun("unknown benchmark \"$bench\"");
+ }
+
+ # If the benchmark doesn't want to run with this many copies, skip it.
+ my $cat = $params->{'cat'};
+ my $maxCopies = $testCats->{$cat}{'maxCopies'};
+ next if ($copies > $maxCopies);
+
+ # Run the benchmark.
+ my $bresult = runBenchmark($bench, $params, $verbose, $logFile, $copies);
+ $results->{$bench} = $bresult;
+ }
+ $results->{'end'} = time();
+
+ # Generate a sorted list of benchmarks for which we have results.
+ my @benches = grep {
+ ref($results->{$_}) eq "HASH" && defined($results->{$_}{'msg'})
+ } keys(%$results);
+ @benches = sort {
+ $results->{$a}{'msg'} cmp $results->{$b}{'msg'}
+ } @benches;
+ $results->{'list'} = \@benches;
+
+ # Generate index scores for the results relative to the baseline data.
+ indexResults($results);
+
+ $results;
+}
+
+
+############################################################################
+# TEXT REPORTS
+############################################################################
+
+# Display a banner indicating the configuration of the system under test
+# to the given file desc.
+sub displaySystem {
+ my ( $info, $fd ) = @_;
+
+ # Display basic system info.
+ printf $fd " System: %s: %s\n", $info->{'name'}, $info->{'system'};
+ printf $fd " OS: %s -- %s -- %s\n",
+ $info->{'os'}, $info->{'osRel'}, $info->{'osVer'};
+ printf $fd " Machine: %s (%s)\n", $info->{'mach'}, $info->{'platform'};
+ printf $fd " Language: %s\n", $info->{'language'};
+
+ # Get and display details on the CPUs, if possible.
+ my $cpus = $info->{'cpus'};
+ if (!defined($cpus)) {
+ printf $fd " CPU: no details available\n";
+ } else {
+ for (my $i = 0; $i <= $#$cpus; ++$i) {
+ printf $fd " CPU %d: %s (%.1f bogomips)\n",
+ $i, $cpus->[$i]{'model'}, $cpus->[$i]{'bogo'};
+ printf $fd " %s\n", $cpus->[$i]{'flags'};
+ }
+ }
+
+ if ($info->{'graphics'}) {
+ printf $fd " Graphics: %s\n", $info->{'graphics'};
+ }
+
+ # Display system load and usage info.
+ printf $fd " %s; runlevel %s\n\n", $info->{'load'}, $info->{'runlevel'};
+}
+
+
+# Display the test scores from the given set of test results.
+sub logResults {
+ my ( $results, $outFd ) = @_;
+
+ # Display the individual test scores.
+ foreach my $bench (@{$results->{'list'}}) {
+ my $bresult = $results->{$bench};
+
+ printf $outFd "%-40s %12.1f %-5s (%.1f s, %d samples)\n",
+ $bresult->{'msg'},
+ $bresult->{'score'},
+ $bresult->{'scorelabel'},
+ $bresult->{'time'},
+ $bresult->{'iterations'};
+ }
+
+ printf $outFd "\n";
+}
+
+
+# Display index scores, if any, for the given run results.
+sub logIndexCat {
+ my ( $results, $cat, $outFd ) = @_;
+
+ my $total = $results->{'numIndex'}{$cat};
+ my $indexed = $results->{'indexed'}{$cat};
+ my $iscore = $results->{'index'}{$cat};
+ my $full = $total == $indexed;
+
+ # If there are no indexed scores, just say so.
+ if (!defined($indexed) || $indexed == 0) {
+ printf $outFd "No index results available for %s\n\n",
+ $testCats->{$cat}{'name'};
+ return;
+ }
+
+ # Display the header, depending on whether we have a full set of index
+ # scores, or a partial set.
+ my $head = $testCats->{$cat}{'name'} .
+ ($full ? " Index Values" : " Partial Index");
+ printf $outFd "%-40s %12s %12s %8s\n",
+ $head, "BASELINE", "RESULT", "INDEX";
+
+ # Display the individual test scores.
+ foreach my $bench (@{$results->{'list'}}) {
+ my $bresult = $results->{$bench};
+ next if $bresult->{'cat'} ne $cat;
+
+ if (defined($bresult->{'iscore'}) && defined($bresult->{'index'})) {
+ printf $outFd "%-40s %12.1f %12.1f %8.1f\n",
+ $bresult->{'msg'}, $bresult->{'iscore'},
+ $bresult->{'score'}, $bresult->{'index'};
+ } else {
+ printf $outFd "%-40s %12s %12.1f %8s\n",
+ $bresult->{'msg'}, "---",
+ $bresult->{'score'}, "---";
+ }
+ }
+
+ # Display the overall score.
+ my $title = $testCats->{$cat}{'name'} . " Index Score";
+ if (!$full) {
+ $title .= " (Partial Only)";
+ }
+ printf $outFd "%-40s %12s %12s %8s\n", "", "", "", "========";
+ printf $outFd "%-66s %8.1f\n", $title, $iscore;
+
+ printf $outFd "\n";
+}
+
+
+# Display index scores, if any, for the given run results.
+sub logIndex {
+ my ( $results, $outFd ) = @_;
+
+ my $count = $results->{'indexed'};
+ foreach my $cat (keys(%$count)) {
+ logIndexCat($results, $cat, $outFd);
+ }
+}
+
+
+# Dump the given run results into the given report file.
+sub summarizeRun {
+ my ( $systemInfo, $results, $verbose, $reportFd ) = @_;
+
+ # Display information about this test run.
+ printf $reportFd "------------------------------------------------------------------------\n";
+ printf $reportFd "Benchmark Run: %s %s - %s\n",
+ strftime("%a %b %d %Y", localtime($results->{'start'})),
+ strftime("%H:%M:%S", localtime($results->{'start'})),
+ strftime("%H:%M:%S", localtime($results->{'end'}));
+ printf $reportFd "%s in system; running %s of tests\n",
+ number($systemInfo->{'numCpus'}, "CPU"),
+ number($results->{'copies'}, "parallel copy", "parallel copies");
+ printf $reportFd "\n";
+
+ # Display the run scores.
+ logResults($results, $reportFd);
+
+ # Display the indexed scores, if any.
+ logIndex($results, $reportFd);
+}
+
+
+############################################################################
+# HTML REPORTS
+############################################################################
+
+# Dump the given run results into the given report file.
+sub runHeaderHtml {
+ my ( $systemInfo, $reportFd ) = @_;
+
+ # Display information about this test run.
+ my $title = sprintf "Benchmark of %s / %s on %s",
+ $systemInfo->{'name'}, $systemInfo->{'system'},
+ strftime("%a %b %d %Y", localtime());
+
+ print $reportFd <
+
+
+
+
+ $title
+
+
+
+EOF
+
+ # Display information about this test run.
+ printf $reportFd "
%s
\n", $title;
+ printf $reportFd "
BYTE UNIX Benchmarks (Version %s)
\n\n",
+ $version;
+}
+
+
+# Display a banner indicating the configuration of the system under test
+# to the given file desc.
+sub displaySystemHtml {
+ my ( $info, $fd ) = @_;
+
+ printf $fd "
\n\n";
+}
+
+
+# Display the test scores from the given set of test results
+# for a given category of tests.
+sub logCatResultsHtml {
+ my ( $results, $cat, $fd ) = @_;
+
+ my $numIndex = $results->{'numIndex'}{$cat};
+ my $indexed = $results->{'indexed'}{$cat};
+ my $iscore = $results->{'index'}{$cat};
+ my $full = defined($indexed) && $indexed == $numIndex;
+
+ # If there are no results in this category, just ignore it.
+ if (!defined($results->{'numCat'}{$cat}) ||
+ $results->{'numCat'}{$cat} == 0) {
+ return;
+ }
+
+ # Say the category. If there are no indexed scores, just say so.
+ my $warn = "";
+ if (!defined($indexed) || $indexed == 0) {
+ $warn = " — no index results available";
+ } elsif (!$full) {
+ $warn = " — not all index tests were run;" .
+ " only a partial index score is available";
+ }
+ printf $fd "
\n";
+
+ # Display the individual test scores.
+ foreach my $bench (@{$results->{'list'}}) {
+ my $bresult = $results->{$bench};
+ next if $bresult->{'cat'} ne $cat;
+
+ printf $fd "
\n";
+ }
+
+ # Display the overall score.
+ if (defined($indexed) && $indexed > 0) {
+ my $title = $testCats->{$cat}{'name'} . " Index Score";
+ if (!$full) {
+ $title .= " (Partial Only)";
+ }
+ printf $fd "
\n";
+ printf $fd "
%s:
\n", $title;
+ printf $fd "
%.1f
\n", $iscore;
+ printf $fd "
\n";
+ }
+
+ printf $fd "
\n\n";
+}
+
+
+# Display index scores, if any, for the given run results.
+sub logResultsHtml {
+ my ( $results, $fd ) = @_;
+
+ foreach my $cat (keys(%$testCats)) {
+ logCatResultsHtml($results, $cat, $fd);
+ }
+}
+
+
+# Dump the given run results into the given report file.
+sub summarizeRunHtml {
+ my ( $systemInfo, $results, $verbose, $reportFd ) = @_;
+
+ # Display information about this test run.
+ my $time = $results->{'end'} - $results->{'start'};
+ printf $reportFd "\n";
+ printf $reportFd "
No Warranties: This information is provided free of charge and "as
+is" without any warranty, condition, or representation of any kind,
+either express or implied, including but not limited to, any warranty
+respecting non-infringement, and the implied warranties of conditions
+of merchantability and fitness for a particular purpose. All logos or
+trademarks on this site are the property of their respective owner. In
+no event shall the author be liable for any
+direct, indirect, special, incidental, consequential or other damages
+howsoever caused whether arising in contract, tort, or otherwise,
+arising out of or in connection with the use or performance of the
+information contained on this web site.
+
+
+EOF
+}
+
+
+############################################################################
+# MAIN
+############################################################################
+
+sub main {
+ my @args = @_;
+
+ my $params = parseArgs(@args);
+ my $verbose = $params->{'verbose'} || 1;
+ if ($params->{'iterations'}) {
+ $longIterCount = $params->{'iterations'};
+ $shortIterCount = int(($params->{'iterations'} + 1) / 3);
+ $shortIterCount = 1 if ($shortIterCount < 1);
+ }
+
+ # If no benchmark units have be specified, do "index".
+ my $tests = $params->{'tests'};
+ if ($#$tests < 0) {
+ $tests = $index;
+ }
+
+ preChecks();
+ my $systemInfo = getSystemInfo();
+
+ # If the number of copies to run was not set, set it to 1
+ # and the number of CPUs in the system (if > 1).
+ my $copies = $params->{'copies'};
+ if (!$copies || scalar(@$copies) == 0) {
+ push(@$copies, 1);
+ if (defined($systemInfo->{'numCpus'}) && $systemInfo->{'numCpus'} > 1) {
+ push(@$copies, $systemInfo->{'numCpus'});
+ }
+ }
+
+ # Display the program banner.
+ system("cat \"${BINDIR}/unixbench.logo\"");
+
+ if ($verbose > 1) {
+ printf "\n", join(", ", @$tests);
+ printf "Tests to run: %s\n", join(", ", @$tests);
+ }
+
+ # Generate unique file names for the report and log file.
+ my $reportFile = logFile($systemInfo);
+ my $reportHtml = $reportFile . ".html";
+ my $logFile = $reportFile . ".log";
+
+ # Open the log file for writing.
+ open(my $reportFd, ">", $reportFile) ||
+ die("Run: can't write to $reportFile\n");
+ open(my $reportFd2, ">", $reportHtml) ||
+ die("Run: can't write to $reportHtml\n");
+ printf $reportFd " BYTE UNIX Benchmarks (Version %s)\n\n", $version;
+ runHeaderHtml($systemInfo, $reportFd2);
+
+ # Dump information about the system under test.
+ displaySystem($systemInfo, $reportFd);
+ displaySystemHtml($systemInfo, $reportFd2);
+
+ # Run the tests! Do a test run once for each desired number of copies;
+ # for example, on a 2-CPU system, we may do a single-processing run
+ # followed by a dual-processing run.
+ foreach my $c (@$copies) {
+ if ($verbose > 1) {
+ printf "Run with %s\n", number($c, "copy", "copies");
+ }
+ my $results = runTests($tests, $verbose, $logFile, $c);
+
+ summarizeRun($systemInfo, $results, $verbose, $reportFd);
+ summarizeRunHtml($systemInfo, $results, $verbose, $reportFd2);
+ }
+
+ runFooterHtml($reportFd2);
+
+ # Finish the report.
+ close($reportFd);
+ close($reportFd2);
+
+ # Display the report, if not in quiet mode.
+ if ($verbose > 0) {
+ printf "\n";
+ printf "========================================================================\n";
+ system("cat \"$reportFile\"");
+ }
+
+ 0;
+}
+
+
+exit(main(@ARGV));
+
diff --git a/testcases/feature-test/oeaware/UnixBench/USAGE b/testcases/feature-test/oeaware/UnixBench/USAGE
new file mode 100644
index 000000000..903a83f7f
--- /dev/null
+++ b/testcases/feature-test/oeaware/UnixBench/USAGE
@@ -0,0 +1,394 @@
+Running the Tests
+=================
+
+All the tests are executed using the "Run" script in the top-level directory.
+
+The simplest way to generate results is with the commmand:
+ ./Run
+
+This will run a standard "index" test (see "The BYTE Index" below), and
+save the report in the "results" directory, with a filename like
+ hostname-2007-09-23-01
+An HTML version is also saved.
+
+If you want to generate both the basic system index and the graphics index,
+then do:
+ ./Run gindex
+
+If your system has more than one CPU, the tests will be run twice -- once
+with a single copy of each test running at once, and once with N copies,
+where N is the number of CPUs. Some categories of tests, however (currently
+the graphics tests) will only run with a single copy.
+
+Since the tests are based on constant time (variable work), a "system"
+run usually takes about 29 minutes; the "graphics" part about 18 minutes.
+A "gindex" run on a dual-core machine will do 2 "system" passes (single-
+and dual-processing) and one "graphics" run, for a total around one and
+a quarter hours.
+
+============================================================================
+
+Detailed Usage
+==============
+
+The Run script takes a number of options which you can use to customise a
+test, and you can specify the names of the tests to run. The full usage
+is:
+
+ Run [ -q | -v ] [-i ] [-c [-c ...]] [test ...]
+
+The option flags are:
+
+ -q Run in quiet mode.
+ -v Run in verbose mode.
+ -i Run iterations for each test -- slower tests
+ use / 3, but at least 1. Defaults to 10 (3 for
+ slow tests).
+ -c Run copies of each test in parallel.
+
+The -c option can be given multiple times; for example:
+
+ ./Run -c 1 -c 4
+
+will run a single-streamed pass, then a 4-streamed pass. Note that some
+tests (currently the graphics tests) will only run in a single-streamed pass.
+
+The remaining non-flag arguments are taken to be the names of tests to run.
+The default is to run "index". See "Tests" below.
+
+When running the tests, I do *not* recommend switching to single-user mode
+("init 1"). This seems to change the results in ways I don't understand,
+and it's not realistic (unless your system will actually be running in this
+mode, of course). However, if using a windowing system, you may want to
+switch to a minimal window setup (for example, log in to a "twm" session),
+so that randomly-churning background processes don't randomise the results
+too much. This is particularly true for the graphics tests.
+
+
+============================================================================
+
+Tests
+=====
+
+The available tests are organised into categories; when generating index
+scores (see "The BYTE Index" below) the results for each category are
+produced separately. The categories are:
+
+ system The original Unix system tests (not all are actually
+ in the index)
+ 2d 2D graphics tests (not all are actually in the index)
+ 3d 3D graphics tests
+ misc Various non-indexed tests
+
+The following individual tests are available:
+
+ system:
+ dhry2reg Dhrystone 2 using register variables
+ whetstone-double Double-Precision Whetstone
+ syscall System Call Overhead
+ pipe Pipe Throughput
+ context1 Pipe-based Context Switching
+ spawn Process Creation
+ execl Execl Throughput
+ fstime-w File Write 1024 bufsize 2000 maxblocks
+ fstime-r File Read 1024 bufsize 2000 maxblocks
+ fstime File Copy 1024 bufsize 2000 maxblocks
+ fsbuffer-w File Write 256 bufsize 500 maxblocks
+ fsbuffer-r File Read 256 bufsize 500 maxblocks
+ fsbuffer File Copy 256 bufsize 500 maxblocks
+ fsdisk-w File Write 4096 bufsize 8000 maxblocks
+ fsdisk-r File Read 4096 bufsize 8000 maxblocks
+ fsdisk File Copy 4096 bufsize 8000 maxblocks
+ shell1 Shell Scripts (1 concurrent) (runs "looper 60 multi.sh 1")
+ shell8 Shell Scripts (8 concurrent) (runs "looper 60 multi.sh 8")
+ shell16 Shell Scripts (8 concurrent) (runs "looper 60 multi.sh 16")
+
+ 2d:
+ 2d-rects 2D graphics: rectangles
+ 2d-lines 2D graphics: lines
+ 2d-circle 2D graphics: circles
+ 2d-ellipse 2D graphics: ellipses
+ 2d-shapes 2D graphics: polygons
+ 2d-aashapes 2D graphics: aa polygons
+ 2d-polys 2D graphics: complex polygons
+ 2d-text 2D graphics: text
+ 2d-blit 2D graphics: images and blits
+ 2d-window 2D graphics: windows
+
+ 3d:
+ ubgears 3D graphics: gears
+
+ misc:
+ C C Compiler Throughput ("looper 60 $cCompiler cctest.c")
+ arithoh Arithoh (huh?)
+ short Arithmetic Test (short) (this is arith.c configured for
+ "short" variables; ditto for the ones below)
+ int Arithmetic Test (int)
+ long Arithmetic Test (long)
+ float Arithmetic Test (float)
+ double Arithmetic Test (double)
+ dc Dc: sqrt(2) to 99 decimal places (runs
+ "looper 30 dc < dc.dat", using your system's copy of "dc")
+ hanoi Recursion Test -- Tower of Hanoi
+ grep Grep for a string in a large file, using your system's
+ copy of "grep"
+ sysexec Exercise fork() and exec().
+
+The following pseudo-test names are aliases for combinations of other
+tests:
+
+ arithmetic Runs arithoh, short, int, long, float, double,
+ and whetstone-double
+ dhry Alias for dhry2reg
+ dhrystone Alias for dhry2reg
+ whets Alias for whetstone-double
+ whetstone Alias for whetstone-double
+ load Runs shell1, shell8, and shell16
+ misc Runs C, dc, and hanoi
+ speed Runs the arithmetic and system groups
+ oldsystem Runs execl, fstime, fsbuffer, fsdisk, pipe, context1,
+ spawn, and syscall
+ system Runs oldsystem plus shell1, shell8, and shell16
+ fs Runs fstime-w, fstime-r, fstime, fsbuffer-w,
+ fsbuffer-r, fsbuffer, fsdisk-w, fsdisk-r, and fsdisk
+ shell Runs shell1, shell8, and shell16
+
+ index Runs the tests which constitute the official index:
+ the oldsystem group, plus dhry2reg, whetstone-double,
+ shell1, and shell8
+ See "The BYTE Index" below for more information.
+ graphics Runs the tests which constitute the graphics index:
+ 2d-rects, 2d-ellipse, 2d-aashapes, 2d-text, 2d-blit,
+ 2d-window, and ubgears
+ gindex Runs the index and graphics groups, to generate both
+ sets of index results
+
+ all Runs all tests
+
+
+============================================================================
+
+The BYTE Index
+==============
+
+The purpose of this test is to provide a basic indicator of the performance
+of a Unix-like system; hence, multiple tests are used to test various
+aspects of the system's performance. These test results are then compared
+to the scores from a baseline system to produce an index value, which is
+generally easier to handle than the raw sores. The entire set of index
+values is then combined to make an overall index for the system.
+
+Since 1995, the baseline system has been "George", a SPARCstation 20-61
+with 128 MB RAM, a SPARC Storage Array, and Solaris 2.3, whose ratings
+were set at 10.0. (So a system which scores 520 is 52 times faster than
+this machine.) Since the numbers are really only useful in a relative
+sense, there's no particular reason to update the base system, so for the
+sake of consistency it's probably best to leave it alone. George's scores
+are in the file "pgms/index.base"; this file is used to calculate the
+index scores for any particular run.
+
+Over the years, various changes have been made to the set of tests in the
+index. Although there is a desire for a consistent baseline, various tests
+have been determined to be misleading, and have been removed; and a few
+alternatives have been added. These changes are detailed in the README,
+and should be born in mind when looking at old scores.
+
+A number of tests are included in the benchmark suite which are not part of
+the index, for various reasons; these tests can of course be run manually.
+See "Tests" above.
+
+
+============================================================================
+
+Graphics Tests
+==============
+
+As of version 5.1, UnixBench now contains some graphics benchmarks. These
+are intended to give a rough idea of the general graphics performance of
+a system.
+
+The graphics tests are in categories "2d" and "3d", so the index scores
+for these tests are separate from the basic system index. This seems
+like a sensible division, since the graphics performance of a system
+depends largely on the graphics adaptor.
+
+The tests currently consist of some 2D "x11perf" tests and "ubgears".
+
+* The 2D tests are a selection of the x11perf tests, using the host
+ system's x11perf command (which must be installed and in the search
+ path). Only a few of the x11perf tests are used, in the interests
+ of completing a test run in a reasonable time; if you want to do
+ detailed diagnosis of an X server or graphics chip, then use x11perf
+ directly.
+
+* The 3D test is "ubgears", a modified version of the familiar "glxgears".
+ This version runs for 5 seconds to "warm up", then performs a timed
+ run and displays the average frames-per-second.
+
+On multi-CPU systems, the graphics tests will only run in single-processing
+mode. This is because the meaning of running two copies of a test at once
+is dubious; and the test windows tend to overlay each other, meaning that
+the window behind isn't actually doing any work.
+
+
+============================================================================
+
+Multiple CPUs
+=============
+
+If your system has multiple CPUs, the default behaviour is to run the selected
+tests twice -- once with one copy of each test program running at a time,
+and once with N copies, where N is the number of CPUs. (You can override
+this with the "-c" option; see "Detailed Usage" above.) This is designed to
+allow you to assess:
+
+ - the performance of your system when running a single task
+ - the performance of your system when running multiple tasks
+ - the gain from your system's implementation of parallel processing
+
+The results, however, need to be handled with care. Here are the results
+of two runs on a dual-processor system, one in single-processing mode, one
+dual-processing:
+
+ Test Single Dual Gain
+ -------------------- ------ ------ ----
+ Dhrystone 2 562.5 1110.3 97%
+ Double Whetstone 320.0 640.4 100%
+ Execl Throughput 450.4 880.3 95%
+ File Copy 1024 759.4 595.9 -22%
+ File Copy 256 535.8 438.8 -18%
+ File Copy 4096 1261.8 1043.4 -17%
+ Pipe Throughput 481.0 979.3 104%
+ Pipe-based Switching 326.8 1229.0 276%
+ Process Creation 917.2 1714.1 87%
+ Shell Scripts (1) 1064.9 1566.3 47%
+ Shell Scripts (8) 1567.7 1709.9 9%
+ System Call Overhead 944.2 1445.5 53%
+ -------------------- ------ ------ ----
+ Index Score: 678.2 1026.2 51%
+
+As expected, the heavily CPU-dependent tasks -- dhrystone, whetstone,
+execl, pipe throughput, process creation -- show close to 100% gain when
+running 2 copies in parallel.
+
+The Pipe-based Context Switching test measures context switching overhead
+by sending messages back and forth between 2 processes. I don't know why
+it shows such a huge gain with 2 copies (ie. 4 processes total) running,
+but it seems to be consistent on my system. I think this may be an issue
+with the SMP implementation.
+
+The System Call Overhead shows a lesser gain, presumably because it uses a
+lot of CPU time in single-threaded kernel code. The shell scripts test with
+8 concurrent processes shows no gain -- because the test itself runs 8
+scripts in parallel, it's already using both CPUs, even when the benchmark
+is run in single-stream mode. The same test with one process per copy
+shows a real gain.
+
+The filesystem throughput tests show a loss, instead of a gain, when
+multi-processing. That there's no gain is to be expected, since the tests
+are presumably constrained by the throughput of the I/O subsystem and the
+disk drive itself; the drop in performance is presumably down to the
+increased contention for resources, and perhaps greater disk head movement.
+
+So what tests should you use, how many copies should you run, and how should
+you interpret the results? Well, that's up to you, since it depends on
+what it is you're trying to measure.
+
+Implementation
+--------------
+
+The multi-processing mode is implemented at the level of test iterations.
+During each iteration of a test, N slave processes are started using fork().
+Each of these slaves executes the test program using fork() and exec(),
+reads and stores the entire output, times the run, and prints all the
+results to a pipe. The Run script reads the pipes for each of the slaves
+in turn to get the results and times. The scores are added, and the times
+averaged.
+
+The result is that each test program has N copies running at once. They
+should all finish at around the same time, since they run for constant time.
+
+If a test program itself starts off K multiple processes (as with the shell8
+test), then the effect will be that there are N * K processes running at
+once. This is probably not very useful for testing multi-CPU performance.
+
+
+============================================================================
+
+The Language Setting
+====================
+
+The $LANG environment variable determines how programs abnd library
+routines interpret text. This can have a big impact on the test results.
+
+If $LANG is set to POSIX, or is left unset, text is treated as ASCII; if
+it is set to en_US.UTF-8, foir example, then text is treated as being
+encoded in UTF-8, which is more complex and therefore slower. Setting
+it to other languages can have varying results.
+
+To ensure consistency between test runs, the Run script now (as of version
+5.1.1) sets $LANG to "en_US.utf8".
+
+This setting which is configured with the variable "$language". You
+should not change this if you want to share your results to allow
+comparisons between systems; however, you may want to change it to see
+how different language settings affect performance.
+
+Each test report now includes the language settings in use. The reported
+language is what is set in $LANG, and is not necessarily supported by the
+system; but we also report the character mapping and collation order which
+are actually in use (as reported by "locale").
+
+
+============================================================================
+
+Interpreting the Results
+========================
+
+Interpreting the results of these tests is tricky, and totally depends on
+what you're trying to measure.
+
+For example, are you trying to measure how fast your CPU is? Or how good
+your compiler is? Because these tests are all recompiled using your host
+system's compiler, the performance of the compiler will inevitably impact
+the performance of the tests. Is this a problem? If you're choosing a
+system, you probably care about its overall speed, which may well depend
+on how good its compiler is; so including that in the test results may be
+the right answer. But you may want to ensure that the right compiler is
+used to build the tests.
+
+On the other hand, with the vast majority of Unix systems being x86 / PC
+compatibles, running Linux and the GNU C compiler, the results will tend
+to be more dependent on the hardware; but the versions of the compiler and
+OS can make a big difference. (I measured a 50% gain between SUSE 10.1
+and OpenSUSE 10.2 on the same machine.) So you may want to make sure that
+all your test systems are running the same version of the OS; or at least
+publish the OS and compuiler versions with your results. Then again, it may
+be compiler performance that you're interested in.
+
+The C test is very dubious -- it tests the speed of compilation. If you're
+running the exact same compiler on each system, OK; but otherwise, the
+results should probably be discarded. A slower compilation doesn't say
+anything about the speed of your system, since the compiler may simply be
+spending more time to super-optimise the code, which would actually make it
+faster.
+
+This will be particularly true on architectures like IA-64 (Itanium etc.)
+where the compiler spends huge amounts of effort scheduling instructions
+to run in parallel, with a resultant significant gain in execution speed.
+
+Some tests are even more dubious in terms of host-dependency -- for example,
+the "dc" test uses the host's version of dc (a calculator program). The
+version of this which is available can make a huge difference to the score,
+which is why it's not in the index group. Read through the release notes
+for more on these kinds of issues.
+
+Another age-old issue is that of the benchmarks being too trivial to be
+meaningful. With compilers getting ever smarter, and performing more
+wide-ranging flow path analyses, the danger of parts of the benchmarks
+simply being optimised out of existance is always present.
+
+All in all, the "index" and "gindex" tests (see above) are designed to
+give a reasonable measure of overall system performance; but the results
+of any test run should always be used with care.
+
diff --git a/testcases/feature-test/oeaware/UnixBench/WRITING_TESTS b/testcases/feature-test/oeaware/UnixBench/WRITING_TESTS
new file mode 100644
index 000000000..28cd968dd
--- /dev/null
+++ b/testcases/feature-test/oeaware/UnixBench/WRITING_TESTS
@@ -0,0 +1,133 @@
+Writing a Test
+==============
+
+Writing a test program is pretty easy. Basically, a test is configured via
+a monster array in the Run script, which specifics (among other things) the
+program to execute and the parameters to pass it.
+
+The test itself is simply a program which is given the optional parameters
+on the command line, and produces logging data on stdout and its results on
+stderr.
+
+
+============================================================================
+
+Test Configuration
+==================
+
+In Run, all tests are named in the "$testList" array. This names the
+individual tests, and also sets up aliases for groups of tests, eg. "index".
+
+The test specifications are in the "$testParams" array. This contains the
+details of each individual test as a hash. The fields in the hash are:
+
+ * "logmsg": the full name to display for this test.
+ * "cat": the category this test belongs to; must be configured
+ in $testCats.
+ * "prog": the name of the program to execute; defaults to the name of
+ the benchmark.
+ * "repeat": number of passes to run; either 'short' (the default),
+ 'long', or 'single'. For 'short' and 'long', the actual numbers of
+ passes are given by $shortIterCount and $longIterCount, which are
+ configured at the top of the script or by the "-i" flag. 'single'
+ means just run one pass; this should be used for test which do their
+ own multi-pass handling internally.
+ * "stdout": non-0 to add the test's stdout to the log file; defaults to 1.
+ Set to 0 for tests that are too wordy.
+ * "stdin": name of a file to send to the program's stdin; default null.
+ * "options": options to be put on the program's command line; default null.
+
+
+============================================================================
+
+Output Format
+=============
+
+The results on stderr take the form of a line header and fields, separated
+by "|" characters. A result line can be one of:
+
+ COUNT|score|timebase|label
+ TIME|seconds
+ ERROR|message
+
+Any other text on stderr is treated as if it were:
+
+ ERROR|text
+
+Any output to stdout is placed in a log file, and can be used for debugging.
+
+COUNT
+-----
+
+The COUNT line is the line used to report a test score.
+
+ * "score" is the result, typically the number of loops performed during
+ the run
+ * "timebase" is the time base used for the final report to the user. A
+ value of 1 reports the score as is; a value of 60, for example, divides
+ the time taken by 60 to get loops per minute. Atimebase of zero indicates
+ that the score is already a rate, ie. a count of things per second.
+ * "label" is the label to use for the score; like "lps" (loops per
+ second), etc.
+
+TIME
+----
+
+The TIME line is optionally used to report the time taken. The Run script
+normally measures this, but if your test has signifant overhead outside the
+actual test loop, you should use TIME to report the time taken for the actual
+test. The argument is the time in seconds in floating-point.
+
+ERROR
+-----
+
+The argument is an error message; this will abort the benchmarking run and
+display the message.
+
+Any output to stderr which is not a formatted line will be treated as an
+error message, so use of ERROR is optional.
+
+
+============================================================================
+
+Test Examples
+=============
+
+Iteration Count
+---------------
+
+The simplest thing is to count the number of loops executed in a given time;
+see eg. arith.c. The utlilty functions in timeit.c can be used to implement
+the fixed time interval, which is generally passed in on the command line.
+
+The result is reported simply as the number of iterations completed:
+
+ fprintf(stderr,"COUNT|%lu|1|lps\n", iterations);
+
+The bnenchmark framework will measure the time taken itself. If the test
+code has significant overhead (eg. a "pump-priming" pass), then you should
+explicitly report the time taken for the test by adding a line like this:
+
+ fprintf(stderr, "TIME|%.1f\n", seconds);
+
+If you want results reported as loops per minute, then set timebase to 60:
+
+ fprintf(stderr,"COUNT|%lu|60|lpm\n", iterations);
+
+Note that this only affects the final report; all times passed to or
+from the test are still in seconds.
+
+Rate
+----
+
+The other technique is to calculate the rate (things per second) in the test,
+and report that directly. To do this, just set timebase to 0:
+
+ fprintf(stderr, "COUNT|%ld|0|KBps\n", kbytes_per_sec);
+
+Again, you can use TIME to explicitly report the time taken:
+
+ fprintf(stderr, "TIME|%.1f\n", end - start);
+
+but this isn't so important since you've already calculated the rate.
+
diff --git a/testcases/feature-test/oeaware/UnixBench/pgms/arithoh b/testcases/feature-test/oeaware/UnixBench/pgms/arithoh
new file mode 100755
index 0000000000000000000000000000000000000000..7bed0da8cfc3906be9617eee974fce094022c42c
GIT binary patch
literal 72872
zcmeI1eQX@palq&HNPSVFNlCF}OOZ|=M(NadBrRH|6c=6|DUkxGkI+=%1a4P*cgy2d
z_rdNhEi!f+y3HRB;shjY)N0`x>W5R;aFG^p69Wkn+6mAAH44G0Q?+niT1Wz+El{RP
zTCx>&oq7A_xVLwwrcL|bJZX2{%$u3__P1}}-dld=#OO&Q5&r
z`s$Z2U0?lLgvL!{pnT**h5R?F8US^>6%T4x*X4JtMnm>v|N8m8D4Lc*Y}wwRmGU>T
zPoh26!2h#N>_^ewC{K42`++9*=bPlew~0StXw>gLP3(`Ly%ToBvyj9Z0MAdb^R`<6
z%fkDZw#uIEdRD>C6=7n^%@w@~Y$?x{pR~%kY|+j`*>k;89_&=f^-#$;t_#jg&I1ak
z+kO;=M^7aOtp^iF`l{95hrv2Ee%eBJCz~sKjyryOFkdP<<8~_VU>Dg!si-@*eAkAK
zIBo-HM+E;F>K{u(Sw^s(=kps2zlGB!3S-*#qS6icCDf^0tqdqh6OiT`mumq#KU37U
z6tGv>p`CfsLr(K?z4)9Bw9~!fvPttNyPCwhp(9`q_PaY^53YByfIaxUcLnVH3{e+7
z0lQvORL}hZyIwMt-3-{ok44{Gd+M8iycUIp48RL(Prdc(V$}NHGTN2_mOF0`
zuXbF2bRlN!{~r1B(Gh|9PQzU7FhI2_eFN$T1Ns)!lL5UQ^;AILj{0u|^t)02{eXTi
z>Td`1eW+g#=m$|ZBC7wuC8_F79RL1J@T*I>G#>zS6^-*ScX;6+E0q^xFgJFUWQ)0)
zjLeNiVD8=bQNJ05*9^Qayub4Y%!3Py*k+Bw>fd79Qgp}1{sATC*+~o_c4Z8P_Pj@|
zm`PZE6!Y%G;|G}c!>GA>{SYkt5IL){T-~=2#kLka{tWZJi3i2MTdC|KNqKSX%ka;`
zt0ur3ucJNK7f!-?h#3p`il0A#@=<_)TEuJp5MvJ97+$q8W;q&p4e-8ET-g5rz+WD`
z^_lMuY<=eQ2W|~K{}%dR$JpfZ@M=oM8r4`D&)QSpP;=yF8?53_hWQK0hx7GalzP=7-LR@JK5n>xVypS9U_UBMOhw^ole~9vZ
zly9QkTd7puK{<*NpBs1rWedhVhjK4UynmH(l)RorK0O94Gy5WYw(eK9-58_#L^IOJ``wn8qsixukq`d<
zyY1Md;^t9~qYw8%{nB-vLt7V0IN9FuGNPoH^4Ia0L_5`CpuOXHwsDjA*%4%dQQ+-!`q^(Ej&meQ9k#)%Iw;L(6-$T-NP-wZ2Kq
zpWLpLsCwBjNkFSQ1FS?%RokHTMZ;9JE$R&LCt9{?xm(MRXvrd~D>o|s_zbOI({iVl
z8@1e}<+)Z<)!4q=;_E!_3#t`hvzFZd?TUXri`u`Ub>_OP^)Kmm9sd6&Ry6njX$Pjh
z-#@Di_iJ_?x<5uI+s%2CiS+R!i6gOiJoZp`??c_kdSkuEjvwhc-g^v&2M3SGx`xl3
zzdzR7lQ@#-1t9inULnlkfy0p_b}!87`TKP(8~J=&w_Uq~56|y@ecg@n@SOj#p8I$5
zVc$kR+y}F5eZ4kkN3LcR}qTBzU&!w{Iba%Q`oXBOnZQD&x9_<52yWWId_6j!52a15!2y8ZZ
zK+#yFHM+BPZ=yA_VTTdH%y&-MU7N3avQ%)o^B1SO-Kj!qI+xFMC(oZ69qPuR#MR8|
z=`N*yC9a1P&*zHMGx2P3ngTYF+%M#(j`qdkS^j_a!B{+eOq4x4eK9`emOLlzmE3sQb4x`G`Cl~H<=J9-62nW=
z<@j{*Qm&YZdv+@Cl&k5|dAmHxz2!vR-XB3|EX
z{Wk-@Cu;3mfZr#z_BP;kr`Em|cx|Y)+ci{`l44PCuimPZ}LwhG&
zZfM^Q{C*E6i^2}z_j;{;Cp3ERMByXw9sM4z^}ieV{akC`1tX9Qnp8asyW!b}cK$U|
z8xn8m6e($pV(I$DA&!*7z@^#JY_crHYd-(bF
z^X!7hL-W(O?}p0_?VZrS#pKjV4&(VwoG*m#*Ln0O&TQx35q`hh;YR3pqi^qoE1`bV
z|4qM3p?-_H->`j3+dD$s{C;=Axe$ln)c(A0K~o%rxRiE>c!^VF}0*RU-D^>*LC6Rw5kiEm#Yr%H~eW;{918(U0vRr25Q9Dz;<
z+o`e9e88Vp=wH7+w<}Qf?}Y=(zkc2J=X3pg>S6TXu0~dS8g63GpgmUWsgWHD(B6o%ft!R*SYNmOJpkos?0*RD#52sN
zi~J4bpr2pP^B1b$_4msodi-XSJR_>#ZLp+y@_l(s`-kr%y)lVD=zgEj`7QLP`^)Pg
z=gFx2+9@D@jr@^Y_;jo;arF;Wo^8PEc9(8=yGj1nn%KWacBB41^;Q#q+Wqt<
zN^m#>mW4Y7%d@j^cm|bAu04f2j9igia_Ew_^Lgu%V_&o;l&y$1y^9X*p=viM7+&_K
zCnj)nm2q4rn=5;cYk37Loi7!gGIo_IS=oFkW#_F7Zam7CJv{^IQei6ZcupqK)7ZnB
z$Q5&z?Yj1?aILSYs|0-A-MUX80|og5oDeZo3%X2`O@8v5j!fz!Bq
zuq@n7Stmwx>&Va;Si_@dlLMpH*^?(9J27sJ4kDnf-3m>Qb5C*2oWgTZh*+VzC8cT_tjP2QQ$-d}V
z1$3v2)&3beS1?2EnqoF)oe$p!E
zvPHW#vxO}99uOp+9W{ttX9|Z5cHVXiHRGbY3KLUquINn!8z51hE#P(&C0>k@lUzez
z$DM*i5qG%>++`=!-ZGBReA7Vk6z){BpxP#JiwcR%Y!UPM(sTWmCmpwpo7Nh|!Zt)?
zQ-H2be3Rn=efDmw1-bxBIBf{vzW~;X4lnRaiUahN~K)qN!IXNgu`_N
z$Wbpx^(&!tW8b9mPs8
ze#S6W(~ORfDQ*1}jvvCNf5UI8-2Y|6RL++}{kJMB|9;a7iZgh$;lc5XhKU;BcLF7#
zUtjPBRH@4Eo!x#j{36OI9vsi#tyjp1g}+n#nxBK~B!3wX`bNz0{Jr5z`a=2pIgg+H
zSYE_<`sU0!f0tO)@jcWDmhkv}E5N6*LarRo-!GPR`~~gE{fDprvpPPh{rEe_IUR5<
z5Pvt;!}0WOlDN|NsPOlYE^T1WeycxLu9N&69>I8XgGpxmexd~YsV}N$_5HZ20la|q
zU_AfsP?HkdZct;On1MP~e+Q3XJb#C|p)Zv8uN1H#zr@6_Em%07
z%Kaf^(B5}tqeU_9pvxWT;o^FjT%!oFQi;um%N
zP%tUh!|{Dh;#=@94OAXb=k-%KKG`I`N5|*aLscyte@4fL`M;#&UkU}V%JP#sKGY5L
zxA-2hPy@%Yne^(~!$y>k>-Y|OIbu18$2cB5e|TQeGQo2XtJN}jm*3N0hmwc;@5G2k
P@sDixJ+q;JkP80=H-%tM
literal 0
HcmV?d00001
diff --git a/testcases/feature-test/oeaware/UnixBench/pgms/context1 b/testcases/feature-test/oeaware/UnixBench/pgms/context1
new file mode 100755
index 0000000000000000000000000000000000000000..9e2750baed4f099fa69cd9fdd2ebc6385f3f85d6
GIT binary patch
literal 73200
zcmeI1e~c9Ob->^59zPGxcR&m_7zRud?D+OLI51$hp1T7M62J~#T1T=v%K`jEieyESPOT;Z1ifull@FxVtG}Ev7C#(bR_VkxGizgNSY+7a=Fp~SaRzmXX&~?
zm|6>@aZ8}G1a&pRFZH!N6V#VydR2ktySg7&wd6QbDekI{yQ@p3@y;$=65MMrTUr
zOT|spNHialdY_85cwM
zpC#XiuBpPxp9W-vM~w`0mf2Z`t|O3qSeS
zj*nVCcyatl%B6l&A1ELB&_VulR0)8ZU5qEytLq9HR=pwn*8hBF1&XFe0c#dDs9n&+
z{zw!1NwgbyMvq=JG|IoHiT%DN_8+6&z%zOrMnj|gJDb>hn%Kvh*a>H&{GV%L{~NR$
zct#Hk4UO{9J!@oN*~I>3w0FQVn1Vhm0q{Hm^OLXKim3EDHx++A8|C=Ubz;n}eZ(
z=jQw&tSKHX9=3|^aL`(f9m(7Sh>#(Sd?naKIkSdp->1y+c@;aXb$S_>=cA5M?fJ
zW%Fs&9)EW)x%`mWMcVEA?A-S=qTI{|Ttlg=-7G`sX-JI6KkZ7(W4
z4yC{y&Q)ln;yl6$$7==Y=R@`?YqXa`cD2@44RHyXWsILj>^HJ{$
z>20VFhV(_K9}nrbqW-;*emm-~hxC=GUkvGMP>;uMwEsEO*M#)DQSS@sNz?~J`UcdG
zhxE;;e=nqOL;dxT{+p;@4D>4xzxspgtuVc6E_~IDK`Mbf;)#j3R>Q%LbND2ezK_7tFKI_a#cCm+v*tQaz5ZCFPCg*$ZEX((9<&V+-BKCt~$}^SJ
zpQDbW{p_moJG>ai(6xL`*f#`d2L^GT;EfA6zzYH^|;tf67S=WnbWJwn@WK9>6=(*;x&8;)88M9QMWNDfMFcN9fCQg(MiK_QUq&@6a4$`yDtAR^s^q^1hHTr!U?Kr4JKo
z37&*=ouvfUwcvRd=G%@Z#SQAbl&6gI6MjB0ZQ^5J58A`I={j+JFX1El5qkx2?y7ko@m~$_w`;CF`F8JuuRXK+TJLkOq5nmU?YlfMJ*Z;i
z)mZA+%oBg1){-kYCl+I_gKPS?P5^A;@qchlV(XPwb9&~9*VQ`nvk0F!wU1|v=0bTx
zsc*!bzJAN9t8{->E4CB+-@hI(p!Tld`h@f4ZkjI(v7YA36>AbwQ@w!I4BOUz@
z9@v*U_Sx*%vF>BpLb1AZA1K2bhF<3ok-7(=$3buua)
zjM}t?8*1YZS7~d6h0|&nJ~i55hh+Q>mm-ehXy*?Qf!d3gypH(lhLe>y^rBl#inP
zE0o_tc?so7l-E$ciIQIN+ObSW?D753GQKjle8DPZTZXnJnCpJ3?47fcp6~C$(?lC?
z@zsOkx8u1B?d!FbY}DTj$79*RGmGaW`}MXN-)VU%(f3?@*L(l`w%RHwZW7Ovp*E=9
zKf?0_+SZ|joo($e;gDE?#aHn>jdm(SZ(I9w@t0yd-V1V(?Oil&WMwMr$tucJv0As_^VfDjM@LO=)z0U;m+gn$qb0zyCt2mv7=1cZPP
z5CTF#2nYcoAOwVf5D)@FKnMr{As_^VfDjM@LO=)z0U;m+gn$qb0zyCt2mv7=1cZPP
z5CTF#2nYcoAOwVf5D)@FKnMr{As_^VfDjM@LO=)z0U;m+gn$qb0zyCt2mv7=1cZPP
z5CTF#2nYcoAOwVf5D)@FKnMr{As_^VfDjM@LO=)z0U;m+gn$qb0zyCt2mv7=1cZPP
z5CTF#2nYcoAOwVf5D)@FKnMr{As_^VfDjM@LO=)z0U;m+gn$qb0zyCt2mv7=1cZPP
z5CTF#2nYcoAOwVf5D)@FKnMr{As_^VfDjM@LO=)z0U;m+gn$qb0zyCt2mv7=1cZPP
z5CTF#2nYcoAOwVf5D)@FKnMr{As_^VfDjM@LO=)z0U;m+gn$qb0zyCt2mv7=1cZPP
z5CTF#2nYcoAOwVf5D)@FKnMr{As_^VfDjM@LO=)z0U;m+guwp`0_Fl!UFZ7NK(=c=
zrmy~(mJ76;niKdf)cQ&7f1B3JGrg*`TbG7{C^-7+KC*mdvn5BLJ
zPPLe-G+XOaaZ{D%t6zW%TDEGrOv}$`$s(Z(mnuR3;#!~37rI32bF^Hlr8(PFCAODm
z1v>ZppsEF!rzN+4y%Jo{Y3*Opx^50GYyG&c*Wv$b;;QEUX&Wf(`~80al-J%SJv=>ADj)@tM-B&S}{w56_pE^}M;65BoOq;Wp0S
zq%Cgav~KGjy!K$+Q$_%u^E|heG4x|%xkc%7W6z>gex1rt`xBo)J!ZtB31cTA))D(`
z+v3(l+w!&pOFLriIIrSKNVIKhy{~m^>*sIlh%K69E_<<$){hZ96VQ*pEwNA97D5aU
zRPc-?lU>38tUEZIPIuwmA99DgY}-qZZ0-R_d;XAJ^ha%fB$=iNm=%M0aqdvUh|NkY
znYAK0D>i#^JcgNXp0KMnU)M-})al9|Ds*{;(ZMk{o9XI1uzSz;F6>ID8d~eS@`I0c
z>h5%A-Q3uC=WuR}I~cMA7XU@ax2VS!bs2Ds8vWh(>S1bhj%Hoo&pMq>F5}uc)a^m{
zH?oDzJx1p+|9^0U(HT95ioTsb)LHQIzLWOzUT4ww@;MCoA5_@Iqq+15hUdqMonyHp
zZZ6a5+k;uBSWTDC+QkuW&CO#+o42E|xw&CPL3a>cb9wAuO?T1w(C+@~xV&G_HN2aj
z>PX-K*b+DEViGv|Qw{A27;k8AsqRPNWC@rBQw{xRSNE%0|2fcToloFu#QQ_7|2*J*
zrq(_mct5JOw*s#{we|(T>vpXjckg*-*rdt{Xainz!j%cQ1$YgqwYS5Wg=W~K$_eOz
z_O?d$MZnJo;h+R827WH6wJ(82`&I%z18?Ykyw?9#;QhVUz7!^)FKklf1T2H8hIW3M
zsSQcM?Qp)KKR@k6{rM>;YQLk_ptS_7Y+_&4#BS8tY3XZ$UsR$M!HQo$&gr=+hN~`*
ze?tcb&rb`hdn4xyo^KX{ADNp${tj3hx$l9!e*AF$i6(irMDnb~4m7$h&cpjN_wx%_
zh{Hl)J3rqA{`K=Eu-A|OEjMkq6?h-!b~z94y=;G=Ngn3&W4%YSKVR2FP3+P68{}CE
zlacWm*q6b%hV~9P9pU`NCjI38@Q%v;d;|-`ne7X7MbPddxD|`
zH@m;5BID$%=uhoN?L}=r7x8C4zZc=~wD#xs9#qwF5`Sa3KD?p*X-{_}Gjz7(Ka-J0t{vWG$J0NPO
ze2wnQTzV>l`t`X(fvVpZ)++z{bvqbW_50K|^j}1cM~*Kxu@5(~A4R(n$wBH>Wv|~C
z{t)f$c;8=*jFa!9y-|Pu6zv_MeI(!u@R80F-7o&SNuGC<|I)g4-&Ot#VM4D1yrcY#
z?D6{b>fcnJWp(@1uXO(CKA6B4f)2Q`Zrr0;%}0Bq>s^j^L;HlqsV4q+Q~vl;h|arv
zoA~b{|IqVB@cset+fDpkwA1~Ha?X)|9J%Rrfb(1pqt7iLqTNtEi2UG(59%lHBkOea
z9A%IAqIzc&dk@*;_50?wCjRtB)}745+6-70zP(t!Jq&BdQ90t-1$^sqbL8TpOWMw6
zts{{RQ1r)!hVY#)<9N=nTl5{z@<*+7HlK5f*jgrU4QKO%
zcGk+^`%uxc$Hp=FXd&zPPA0j&v4=I}=3LA6Jo~8S
zuW_}ko%?(D?yz?3+iqE4ZU4%?-o5xzV_Eo;XYJUfYj;s-1W_YJ-hq*tqsZUdod8-n9>%$DtYv_=;o@G8+)MOcwXMCTF8BOYOi$z
zMrvSj87Ih{I3U0v(M;3hytI0K#_YQ?UEGJ|8HXN}JIo2qqBTPCezNymH
zTI3M401x-~KkQ}zZ$+`D9m^UX&F8A!z?fp*8nJU3?4doF_iC?x_k-w}adXyK(a8Y5
z&-(dnZJ%;D#4=gSbF%rg?K_m%chgqEr3>bMRWBS|93n7u#6ua*<2iOFtXo#`Xz{RB
zbcb_xZG#pv<-1F;dEC{Y_M8G<33b5pa(PR24d0N%E!bJx8x0%OSc0K~=jQyOa0Mib
zM@RA98zo-9k|SI~U&kvzGKa7HN&MuHR4>|{2tF|N_U*>k_F+(UBlx}!$;{Cl<_o0n
z1vQ5quZaDtQ7o)Osx}4aQX%U@l7>7a>FXOL)euN}c{MPS&WN5HBN>ckEr2u*x$q~c8VHi%ErTS^)lr;+LBG_cC2imL+`%y)=XWyW42}(BHajp6_{_Za;ec
zPwDtR?Ptz5Rmr3V@tDvTO!2qk8I6AyZRDcetKWsDXi(t6oC7WBE0;-r6VGrwe?Kbg
zcq0Ow|GvXHo{z-y^AXGoJWkTh4IO5vOvP{G8E&7yN1fC!mi!*iVR~4w3a4~D^XKnY
zr*(WH(tqa130{uG^Y^VY_^3z6qk*XYM%(`p)=(4NK7R*mZwo@&e@**S=ti!LiNBYX
zXLNiZs=wTSmTzLsEqHP~e^ex&RmY#x{paz;aXbz#U@WC)ASTqwfG3r_PSt(4eQ9isbj2bK2utByV*5-G=d;Gwk}N
z`cq1MzoNeDn#5nz@!P{mu^f%>X%cVZUtQ?n_e-VVPc*)-NqknvXE{*IX#74MALV~u
z$DfP@u*&i)IzG}2_$`5l5vkxfHj~bCpKT;h==f8#IpVPf&lH|Ket2BbGRb2Pi`8R`
dUO4tfDx>XpU_``4<@m~g{gpY^{{{=8a*}?z-
literal 0
HcmV?d00001
diff --git a/testcases/feature-test/oeaware/UnixBench/pgms/dhry2 b/testcases/feature-test/oeaware/UnixBench/pgms/dhry2
new file mode 100755
index 0000000000000000000000000000000000000000..ef9c8b42e513607b4bb1195b407be3b280e7094f
GIT binary patch
literal 73808
zcmeHNe{fXSbw2N{ga9ES0R|a}wSa60u~rB$ARLe1N&-Q{4@HR7N!&cGc30Ad)vnlG
z*&?QmN}Q$*X+{bpCO_(8CrvHSv_o5xSkp`*r=AWoab~DvXPi#z3KDxro3=^`juacV
z-+k{M>FH_3B$;-mf1GFDo_oK0&bjA)_uTv5yYff(9N23b2JvOmH%Qe67g-ME{t~sW
zn$fXn7cC--YG?y3fL0N1*0Xep<)k)axe&VQ3gA|DyG&J6(o$ktu5oHaEV*|ji*=hT
zEM0SDU!_}RNzQ28Ulr^4O}BmgX4omPd{)OXvDs~BI|VDdNo_Z&?O0ycfwAQAsj;cM
zUHfe}E!0%qQi7VD-x;ZN)z$iXE~d&u_nsRhW|dj7GmD(y=Ylh
zpjFXY2!8~4uz6DJac2x^1VjL!N$iT0<-T%0x`g+t&W2XOmP+#xXuOIpP
z$O^=G)+`ym@j+wc4wV1ylsWv%s5$b>fbr~PfHqUf;7hkQ($p*Y{1a~m=vh^Nq@1Sy
z33a2DhB2Q*kB{#hMxG5t$94@D7Z0}WrLmWaD`>RQ7;O9aLThSe6^#^I=Hwr(wMOn)
zOCxb$9Sdmc@xJh6O`kFO`u#N3g|?|wwmjAu(GPkm^?Xmu$a4;s{t
zWE(=h4P6DN>pFG~(>`nJyNDx5gKd8SISy_O8hmLrbapKqZ2Le{!|*qNf4hx<)6hC=
z^RODj7~=Yx(hscMtlBZvLp0VfybiWcL3icD*2tZR@eag-@vZ1Ue^>*<9VSh!M_*0w
zJ@p=%x)Lyl$3JEa51gWg&US6_`9
z!x5A%+)?QN-ZsmPd|tU@U`5K5aErlX3i~Z{^z~==Xly
zcluc+K->>c3jF5It!*DC>Q
z>gom>ego^o;q>x7$cw?3ig1U)^8wt-u2cI|e#WtW=w~fc7STluxYxIuZ?eo`yy9W*
z%;5)#{(TIuAy;6t?pn)K1U3`MQ^LAac7FYJMBiQi_D8=TUh;>h*S#G+^E~vg!nSdu
zWvbJ$HK%P=TsI&4mb1pMm(tX7^x4tb*f2*izMan}D&JlrUI
z@I0hqFjTG$?i_AxwWe+@t)5hQy3Wb7!M3+)1c8V~weNtVH{GVEN|j*jtuq
zzt@j^YvdQ;^~>n{#$)dK{2JEh8O)Ido2QW@Pc5@Xn^#yPv6VE^ydn%~KsG{}Bh6LT
zRBSa(eGK#YIBa6b-zdtBWKDIVtn3>t8n#uQEw)C_8?=28dN&>mkC;oWkyGUa8HO|<
z8zIdRg3m$tyne*xV6fa8eID^e;5V|BhIhi(m*MwnVEMBhh%JmbmA@$ZY$`G)m0c6o
zlp4P?uY>0B`a!F~E;5c=m|HcECRw8!DqvefmFp<*9`HG;@M--bYw8r{PmRMQtVz_>
zIM%^N@$xS1-^mB~4`a;)(1%%LjeZpU80V?s3fAK~GB+DFu5T3&>#u{pF-Q&7o;KB9
zy%775N$qNlu18GOMRa@|G4IFvtFEBq5tJ>o1$DfRzYAESL&otx&F9;Zzs@*RE=}t!
z#H$P$rIv2nvw|uY+&S3Rc=8X<{N+jO%)(1|pZ@B}d%pMrnWqB-8=tG8qSJw58ZACS
zqw8qFX^i1?06BLGWB)4re!L3vTSS$oR+#Ukf$rzJF3@Q3;!>wy6MF+e$EJ11(4WJ#
z;#9@6o&pVuW0kMPsAJ8xnG3Ck4NEXL6gW1HF~*3ts~q10-QPnVW3O$i#{TwK@TJD8
z_IvE1Z6^904;Y(om4qji&({Oy<{IGtsqz{ej)QxJD^B^j%Fk57rQUbc)pYpiq4tw^
zCu1k;PbT}a)2}AY``X@rr2X)rJ;Bzt!!2#$gZBoH96q=wc%;2;|DhJ5quEGze0T8f
zY%tcJiR2RLlxteRu72Z@_G!BU?b|lCHQ%m%>&8Pf?Z%qG{p#E14%`7bKh(K@_py4QTZCe{S{9is^72RJ`ZdYB<(G)coMOzkEn0reh#?vK@~2qIAcC(
z?0L=grDFY&Zda1f-5=PH|?s5rN@`N`4`e4*^~<&9rl;_~)x=>RLm2XV3Uz+MHW+SR4xMPM%jd)Pr->=LlG$dek4DNcR_>|S6U3Ijh+
znnxBpqefNn`Zn5=Xvc7-FXev)#$SOP_TtkDQV<4&0bxKG5C((+VL%uV2801&Ko}4P
zgaKhd7!U@80bxKG5C((+VL%uV2801&Ko}4PgaKhd7!U@80bxKG5C((+VL%uV2801&
zKo}4PgaKhd7!U@80bxKG5C((+VL%uV2801&Ko}4PgaKhd7!U@80bxKG5C((+VL%uV
z2801&Ko}4PgaKhd7!U@80bxKG5C((+VL%uV2801&Ko}4PgaKhd7!U@80bxKG5C((+
zVL%uV2801&Ko}4PgaKhd7!U@80bxKG5C((+VL%uV2801&Ko}4PgaKhd7!U@80bxKG
z5C((+VL%uV2801&Ko}4PgaKhd7!U@80bxKG5C((+VL%uV2801&Ko}4PgaKhd7!U@8
z0bxKG5C((+VL%uV2801&Ko}4PgaKhd7!U@80bxKG5C((+VL%uV2801&Ko}4PgaKhd
z7!U@80bxKG5C((+VL%uV2801&Ko}4PgaKhd7!U@80bxKG5C((+VL%uV2801&Ko}4P
zgaKhd7!U@80bxKG5C((+VL%uV2L2y1Fu2%qqUu}Z%2r)Bbgb{wa*38b3tYW2T^}oR
z>v!t<_|33WTBGX~TCUb|LbtEg^@UpAxRp<<;yKe&f{LAAqKYERDJ{_TG1GENi=AJh
zzt^%<%PKAJ(2_+!7p~>q_)J}&&~k;AC0ef1a-i69N{m+6i^%iN#{ol&FUM4Ze#_r~@(k9~d%j?tu
zJwF!8MNWO8@o7j$uhs!(|CocQ8$rYG(D)J=mB!uW%S!|0Ys!zVsx&IFM$8Ze%6FIE
zU)oT*@y<$PS&3EkM5EdZdTa;dLy}>P*hn^u|bPW4k!>biUTW(*M^FX1Kc#kKhRumT+ESP9{BW%fr#6{L^$*l`r}{ZS58@7hvhkd)
zBDPh?gbWQf)$i0v8m#S2CUUuCyf&VSB_b)*Bb|u{l6^an<=y=6@YZ0h|L)4>BGF^D
zeVKGF9?hjQwb@)Ior1}4(Gba=NJV>Kp6<`q_NN|Aq++$XNM|yhopu*ZMzTHJTOy4>
z=kLd%Es^R56>0{dno1+QS>Xa~-QP5wmnZdF!)*TAR{)d0%e3a`2ax&g1@Qn46vT_B
zpDSKx0X${K3hFPIe(ucHFQG#FbpSgN@AI?u7ZE=%X5)*ApDVNRQsRAQHok;-Zh6~y_iWVT6wR@3E8h5Ybai(jALR{Z!o
zW&^qwpu6V5tLMRkb8xlo717W0{swoqpPT2UP&BaPa{dRkq5B?GHvL>=f9`uq8C~?P
zOV@uTJ?xuz7oVFy?0;+?KYczwH3*>4xY!Rrn>o&Rp)aCk_yI*}3ZAO88?0dkKfBs%lKGfy<
znY$nU5N!s{#a;bM`eWaGx%f<+j&uA>;=Uc9eM>A>btL=0TrnG;Z(dkG>6=IOwq{`e
z_v5OQLh&qh^ylvLRSw~EpD*un^ylvXZhp>v-i4uGiFv$WxcjynH_-v$%bbzV{(0EJ
z%P6S%xmPzwH9l5qImi}`e|R20pV0cubGz36H1MDtN6!s{1qEr
ztFHmC0RBl|KL3N(zvRo??`fQ$cg(}v+K>Oa_OA}UYL5RmfG=j3pzBzY4=>T3R&f|{%ndczKIq2Y3^qepL#~nY|-!z{;
z(w(2yc&pA&_jye88I4cq{N%j-yvAS9`h2c(-od>ia?5`UT=8?x7tf`6^#57wKcn@T
z&mU|2g65gW`!nDwZ~gi9bLHPGBYq!X{T&Kd`={Rj->pMlgn2CFr&8nob)nv%lplU>
zZh~KRy+h~23%)ov0$2K5be`9^>J-xWMUC_G?mq43ypM+mHU6A0-wta0vc@lI{|`C-
zb;To&D}Ce}uU~HZeeGvV`@ynuu1^3D>atfn<@hfney`Z18=rA-uclibRk%6#ed*it
z=)a`>zpV4^VRnhH>H1KbYGP#D_>#4Ak#4FPK;_X)qz_-fiIn<)Pe3IaNha+_3{m0^&cy}V3i)ZXyuN_UMQ}HbNily!DWV$nwv}5=(
z&f1aw0g9%3`;zfoJQmtgSi|m0q!M-{lZl+LUC%-`jfvCZ3uN&D<SQ62
znpWMWb#{=wzx|*+!%_ReCdE*@`m(2RCSz~4TR;ye=2HkrAH&j;x+=i&XJ(ON?>`L97&z{!yy#4*H3
zF0Mk(C8Bm;0-PX+6cZk9sWF{Sx~2|+^)8Rw=x7!lJH8La2Xc06F5_ycDXQlw%AUwR
zVrLWGsmQFbY#Xzxvkf!H>T2S7Y0R4TOuP>aQY0D4^m>Qqq%?K)WfG}em$$*mI(#05
zvL||RCIks;9NEJqn8Y)E6iVSJDuko3kaGZ3i>Y*>aN~X)Cv}t4)`N2-3dK&O;Lnx0
zjN9@^Jd?%A(yWS&HqaYU2D;Rj%uz@!KnmqR7lqWiMO)fQ%}~5YuhgCxY`NxIxJ%Zx
z;4&OXdJ|Ffnuag@gq#jsXIK{$!p_l)4Z)4e8Cock%ViRs{W2nsqUF-
zBGr|iZtCpJ#2?`r4h-X527z4E=Ddymwo;u?sAq$I>`kAK?b3k%1cRR=+}{Qi)Za4L
zp3l+RHNekD*5~Ip*LV2r2TaRp8qoGZr#AEBw{J$%oA}!Y_kYf`9OZMq{`v1e{2t;c
z;pY(UQrv7mW?HBb&hZ>Ut$M%