From fa145d051e17037a097ad2df0b2f018cb3a4f9cc Mon Sep 17 00:00:00 2001
From: "lux.lu"
Date: Thu, 25 Aug 2016 14:21:38 +0800
Subject: [PATCH 01/54] Set the maximum number of simultaneous remote commands
for pdsh
Signed-off-by: Li Ning
---
conf/common.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/conf/common.py b/conf/common.py
index 0db6217..72c9ea0 100644
--- a/conf/common.py
+++ b/conf/common.py
@@ -145,7 +145,7 @@ def pdsh(user, nodes, command, option="error_check", except_returncode=0, nodie=
for node in nodes:
_nodes.append("%s@%s" % (user, node))
_nodes = ",".join(_nodes)
- args = ['pdsh', '-R', 'exec', '-w', _nodes, 'ssh', '%h', '-oConnectTimeout=15', command]
+ args = ['pdsh', '-R', 'exec', '-w', _nodes, '-f', str(len(nodes)), 'ssh', '%h', '-oConnectTimeout=15', command]
# args = ['pdsh', '-w', _nodes, command]
printout("CONSOLE", args, screen=False)
From 1a4b42379f07726b6b071c7da732ec27c5662961 Mon Sep 17 00:00:00 2001
From: "lux.lu"
Date: Thu, 25 Aug 2016 13:59:55 +0800
Subject: [PATCH 02/54] Enable Vdbench support in CeTune
Signed-off-by: Li Ning
---
analyzer/analyzer.py | 26 ++-
benchmarking/mod/bblock/__init__.py | 2 +-
benchmarking/mod/bblock/vdbench.py | 301 ++++++++++++++++++++++++++++
benchmarking/run_cases.py | 2 +
4 files changed, 329 insertions(+), 2 deletions(-)
create mode 100644 benchmarking/mod/bblock/vdbench.py
diff --git a/analyzer/analyzer.py b/analyzer/analyzer.py
index 2ddffad..5c290ff 100644
--- a/analyzer/analyzer.py
+++ b/analyzer/analyzer.py
@@ -196,7 +196,7 @@ def get_execute_time(self):
def summary_result(self, data):
# generate summary
- benchmark_tool = ["fio", "cosbench"]
+ benchmark_tool = ["fio", "cosbench", "vdbench"]
data["summary"]["run_id"] = {}
res = re.search('^(\d+)-(\w+)-(\w+)-(\w+)-(\w+)-(\w+)-(\w+)-(\d+)-(\d+)-(\w+)$',data["session_name"])
if not res:
@@ -323,6 +323,8 @@ def _process_data(self, node_name):
workload_result.update(self.process_cosbench_data("%s/%s/%s" %(dest_dir, node_name, dir_name), dir_name))
if '_sar.txt' in dir_name:
result.update(self.process_sar_data("%s/%s/%s" % (dest_dir, node_name, dir_name)))
+ if 'totals.html' in dir_name:
+ workload_result.update(self.process_vdbench_data("%s/%s/%s" % (dest_dir, node_name, dir_name), "%s_%s" % (node_name, dir_name)))
if '_fio.txt' in dir_name:
workload_result.update(self.process_fio_data("%s/%s/%s" % (dest_dir, node_name, dir_name), dir_name))
if '_fio_iops.1.log' in dir_name or '_fio_bw.1.log' in dir_name or '_fio_lat.1.log' in dir_name:
@@ -598,6 +600,28 @@ def process_iostat_data(self, node, path):
result[output]["disk_num"] = disk_num
return result
+ def process_vdbench_data(self, path, dirname):
+ result = {}
+ vdbench_data = {}
+ runtime = int(common.bash("grep -o 'elapsed=[0-9]\+' "+path+" | cut -d = -f 2"))
+ stdout, stderr = common.bash("grep 'avg_2-' "+path, True)
+ vdbench_data = stdout.split()
+ output_vdbench_data = OrderedDict()
+ output_vdbench_data['read_lat'] = vdbench_data[8]
+ output_vdbench_data["read_iops"] = vdbench_data[7]
+ output_vdbench_data["read_bw"] = vdbench_data[11]
+ output_vdbench_data['read_runtime'] = runtime
+ output_vdbench_data['write_lat'] = vdbench_data[10]
+ output_vdbench_data["write_iops"] = vdbench_data[9]
+ output_vdbench_data["write_bw"] = vdbench_data[12]
+ output_vdbench_data['write_runtime'] = runtime
+ output_vdbench_data['lat_unit'] = 'msec'
+ output_vdbench_data['runtime_unit'] = 'sec'
+ output_vdbench_data['bw_unit'] = 'MB/s'
+ result[dirname] = {}
+ result[dirname]["vdbench"] = output_vdbench_data
+ return result
+
def process_fio_data(self, path, dirname):
result = {}
stdout, stderr = common.bash("grep \" *io=.*bw=.*iops=.*runt=.*\|^ *lat.*min=.*max=.*avg=.*stdev=.*\" "+path, True)
diff --git a/benchmarking/mod/bblock/__init__.py b/benchmarking/mod/bblock/__init__.py
index affda02..07dd2f1 100644
--- a/benchmarking/mod/bblock/__init__.py
+++ b/benchmarking/mod/bblock/__init__.py
@@ -1,3 +1,3 @@
-__all__ = ['qemurbd', 'fiorbd']
+__all__ = ['qemurbd', 'fiorbd', 'vdbench']
diff --git a/benchmarking/mod/bblock/vdbench.py b/benchmarking/mod/bblock/vdbench.py
new file mode 100644
index 0000000..a0ced5c
--- /dev/null
+++ b/benchmarking/mod/bblock/vdbench.py
@@ -0,0 +1,301 @@
+from ..benchmark import *
+from collections import OrderedDict
+import itertools
+
+class VdBench(Benchmark):
+ def __init__(self):
+ self.bench_type = "vdbench"
+ super(self.__class__, self).__init__()
+ self.cluster["bench_dir"] = "%s/%s/" % (self.all_conf_data.get("tmp_dir"), self.bench_type)
+ # Format default output dir: vdbench/output/
+ self.cluster["format_output_dir"] = "%s/output/" % (self.cluster["bench_dir"])
+ # Run results dir: vdbench/results/
+ self.cluster["result_dir"] = "%s/results/" % (self.cluster["bench_dir"])
+ common.printout("LOG","bench dir: %s, format output dir: %s, result dir: %s" % (self.cluster["bench_dir"], self.cluster["format_output_dir"], self.cluster["result_dir"]))
+
+ def load_parameter(self):
+ super(self.__class__, self).load_parameter()
+ self.custom_script = self.all_conf_data.get("custom_script", True )
+
+ self.cluster["vclient"] = self.all_conf_data.get_list("list_vclient")
+ disk_num_per_client = self.cluster["disk_num_per_client"]
+ self.disk_num_per_client = disk_num_per_client
+ self.volume_size = self.all_conf_data.get("volume_size")
+ self.instance_list = self.cluster["vclient"]
+ self.testjob_distribution(disk_num_per_client, self.instance_list)
+
+ def cal_run_job_distribution(self):
+ number = int(self.benchmark["instance_number"])
+ client_total = len(self.cluster["client"])
+ # Assume number is always 50 here
+ self.benchmark["distribution"] = {}
+ client_num = 0
+ for client in self.cluster["testjob_distribution"]:
+ vclient_total = int(self.disk_num_per_client[client_num])
+ self.benchmark["distribution"][client] = copy.deepcopy(self.cluster["testjob_distribution"][client][:vclient_total])
+ client_num += 1
+ nodes = []
+ for client in self.benchmark["distribution"]:
+ nodes.extend(self.benchmark["distribution"][client])
+ self.cluster["nodes_distribution"] = nodes
+
+ def prepare_result_dir(self):
+ #1. prepare result dir
+ self.get_runid()
+ vdisk = self.benchmark["vdisk"].split('/')[-1]
+ self.benchmark["section_name"] = "%s-%s-%s-qd%s-%s-%s-%s-%s" % (self.bench_type, self.benchmark["iopattern"], self.benchmark["block_size"], self.benchmark["qd"], self.benchmark["volume_size"],self.benchmark["rampup"], self.benchmark["runtime"], vdisk)
+ self.benchmark["dirname"] = "%s-%s-%s" % (str(self.runid), str(self.benchmark["instance_number"]), self.benchmark["section_name"])
+ self.cluster["dest_dir"] = "/%s/%s" % (self.cluster["dest_dir"], self.benchmark["dirname"])
+
+ res = common.pdsh(self.cluster["user"],["%s"%(self.cluster["head"])],"test -d %s" % (self.cluster["dest_dir"]), option = "check_return")
+ if not res[1]:
+ common.printout("ERROR","Output DIR %s exists" % (self.cluster["dest_dir"]))
+ sys.exit()
+
+ common.pdsh(self.cluster["user"] ,["%s" % (self.cluster["head"])], "mkdir -p %s" % (self.cluster["dest_dir"]))
+
+ def cleanup(self):
+ super(self.__class__, self).cleanup()
+ #1. clean the tmp res dir
+ user = self.cluster["user"]
+ nodes = self.cluster["nodes_distribution"]
+ common.pdsh(user, nodes, "rm -rf %s/*" % self.cluster["format_output_dir"])
+ common.pdsh(user, nodes, "rm -rf %s/*" % self.cluster["result_dir"])
+
+ def check_run_success(self, check_file, max_time):
+ user = self.cluster["user"]
+ nodes = self.cluster["nodes_distribution"]
+ cur_check = 0
+ sleep_sec = 2
+ max_check = max_time / sleep_sec
+ while cur_check < max_check:
+ common.printout("LOG", "checking... %s" % cur_check)
+ stdout, stderr = common.pdsh(user, nodes, "grep completed %s" % check_file, option="check_return")
+ res = common.format_pdsh_return(stdout)
+ if len(nodes) != len(res.keys()):
+ time.sleep(sleep_sec)
+ else:
+ common.printout("LOG", "checking done")
+ return
+ cur_check += 1
+ common.printout("ERROR","Checking run in %s failed" % check_file)
+ stdout, stderr = common.pdsh(user, nodes, "grep -q completed %s; if [ $? -ne 0 ]; then echo Run is not completed successfully; fi" % check_file, option="check_return")
+ sys.exit()
+
+ def format_run(self):
+ common.printout("LOG", "Start Formatting!")
+ user = self.cluster["user"]
+ nodes = self.cluster["nodes_distribution"]
+ common.pdsh(user, nodes, "cd %s; ./vdbench -f format.cfg -o %s" % (self.cluster["bench_dir"], self.cluster["format_output_dir"]))
+ check_file = "%s/summary.html" % self.cluster["format_output_dir"]
+ self.check_run_success(check_file, 100)
+
+ def prepare_run(self):
+ super(self.__class__, self).prepare_run()
+ user = self.cluster["user"]
+ dest_dir = self.cluster["tmp_dir"]
+ self.cleanup()
+ # format
+ self.format_run()
+
+ def wait_workload_to_stop(self):
+ pass
+
+ def stop_workload(self):
+ pass
+
+ def generate_benchmark_cases(self, testcase):
+ io_pattern = testcase["iopattern"]
+ block_size = testcase["block_size"]
+ queue_depth = testcase["qd"]
+ rbd_volume_size = testcase["volume_size"]
+ warmup_time = testcase["rampup"]
+ runtime = int(testcase["runtime"])
+ disk = testcase["vdisk"]
+
+ custom_params = testcase["custom_parameters"]
+ for str in custom_params.split(','):
+ str2 = str.split('=')
+ if len(str2) != 2:
+ continue
+ if str2[0] == "width":
+ width = int(str2[1]);
+ elif str2[0] == "depth":
+ depth = int(str2[1]);
+ elif str2[0] == "files":
+ files_num = int(str2[1]);
+ elif str2[0] == "threads":
+ threads_num = int(str2[1]);
+ elif str2[0] == "rdpct":
+ read_percentage = int(str2[1]);
+
+ if int(re.findall(r"\d", block_size)[0]) * depth * width > int(re.findall(r"\d", rbd_volume_size)[0]) * 1024 * 1024:
+ common.printout("ERROR","Files total size is too big, bigger than volume size!")
+ raise KeyboardInterrupt
+
+ if io_pattern in ["randread", "randwrite", "randrw"]:
+ fileio = "random"
+ if io_pattern in ["seqread", "seqwrite", "readwrite", "rw"]:
+ fileio = "sequential"
+ if io_pattern == "randread":
+ read_percentage = 100
+ if io_pattern == "randwrite":
+ read_percentage = 0
+
+ format_cfg = []
+ format_cfg.append("fsd=fsd1,anchor=/mnt/,depth=%d,width=%d,files=%d,size=%s" % (depth, width, files_num, block_size))
+ format_cfg.append("fwd=default")
+ format_cfg.append("fwd=fwd1,fsd=fsd1")
+ format_cfg.append("rd=rd0,fwd=fwd1,fwdrate=max,format=only")
+ with open("../conf/format.cfg", "w+") as f:
+ f.write("\n".join(format_cfg)+"\n")
+
+ case_cfg = []
+ case_cfg.append("fsd=fsd1,anchor=/mnt/,depth=%d,width=%d,files=%d,size=%s" % (depth, width, files_num, block_size))
+ case_cfg.append("fwd=default,xfersize=4k,fileio=%s,fileselect=random,threads=%d" % (fileio, threads_num))
+ case_cfg.append("fwd=fwd1,fsd=fsd1,rdpct=%d" % read_percentage)
+ case_cfg.append("rd=rd1,fwd=fwd1,fwdrate=max,format=no,elapsed=%d,interval=1" % runtime)
+ with open("../conf/vdbench_test.cfg", "w+") as f:
+ f.write("\n".join(case_cfg)+"\n")
+ return True
+
+ def parse_benchmark_cases(self, testcase):
+ p = testcase
+ testcase_dict = {
+ "instance_number":p[0], "volume_size":p[1], "iopattern":p[2],
+ "block_size":p[3], "qd":p[4], "rampup":p[5],
+ "runtime":p[6], "vdisk":p[7], "custom_parameters":p[8]
+ }
+ if len(p) == 10:
+ testcase_dict["description"] = p[9]
+ elif len(p) > 10:
+ common.printout("ERROR","Too much columns found for test case ")
+ sys.exit()
+ else:
+ testcase_dict["description"] = ""
+
+ return testcase_dict
+
+ def run(self):
+ super(self.__class__, self).run()
+ user = self.cluster["user"]
+ nodes = self.cluster["nodes_distribution"]
+ waittime = 15
+ common.printout("LOG", "Start Running VdBench!")
+ common.pdsh(user, nodes, "cd %s; ./vdbench -f vdbench_test.cfg -o %s" % (self.cluster["bench_dir"], self.cluster["result_dir"]))
+ check_file = "%s/summary.html" % self.cluster["result_dir"]
+ self.check_run_success(check_file, 100)
+ for wait in range(1, waittime):
+ time.sleep(1)
+
+ def archive(self):
+ super(self.__class__, self).archive()
+ user = self.cluster["user"]
+ nodes = self.cluster["nodes_distribution"]
+ head = self.cluster["head"]
+ dest_dir = self.cluster["dest_dir"]
+ #collect client data
+ for node in nodes:
+ common.bash("mkdir -p %s/raw/%s" % (dest_dir, node))
+ common.rscp(user, node, "%s/raw/%s/" % (dest_dir, node), "%s/*" % self.cluster["result_dir"])
+
+
+ def prepare_images(self):
+ user = self.cluster["user"]
+ dest_dir = self.cluster["tmp_dir"]
+ controller = self.cluster["head"]
+ rbd_count = len(self.instance_list)
+ rbd_size = self.all_conf_data.get("volume_size")
+ if rbd_count and rbd_size:
+ super(self.__class__, self).create_image(rbd_count, rbd_size, 'rbd')
+ else:
+ common.printout("ERROR","need to set rbd_volume_count and volune_size in all.conf")
+
+ #create image xml
+ common.printout("LOG","create rbd volume vm attach xml")
+ common.scp(user, controller, "%s/vm-scripts" % (self.pwd), "/opt/");
+ common.scp(user, controller, "%s/conf" % (self.pwd), "/opt/");
+ common.pdsh(user, [controller], "cd /opt/vm-scripts; echo 3 | bash create-volume.sh create_disk_xml", "check_return")
+ common.rscp(user, controller, "%s/vm-scripts/" % (self.pwd), "/opt/vm-scripts/vdbs/");
+ common.printout("LOG","Distribute vdbs xml")
+ for client in self.cluster["testjob_distribution"]:
+ common.scp(user, client, "../vm-scripts/vdbs", dest_dir)
+
+ #attach to vm
+ self.attach_images(self.cluster["testjob_distribution"])
+
+ #start to init
+ common.printout("LOG","rbd initialization finished")
+
+ def prepare_case(self, user, nodes):
+ stdout, stderr = common.pdsh(user, nodes, "test -d %s" % self.cluster["bench_dir"], option="check_return")
+ if stderr:
+ common.printout("LOG","Distribute vdbench benchmark execution file")
+ for node in nodes:
+ common.scp(user, node, '../conf/%s.tar.gz' % self.bench_type, '%s' % self.cluster["tmp_dir"])
+ common.pdsh(user, nodes, 'cd %s; tar xzf %s.tar.gz' % (self.cluster["tmp_dir"], self.bench_type))
+
+ common.pdsh(user, nodes, "mkdir -p %s" % self.cluster["result_dir"])
+ for node in nodes:
+ common.scp(user, node, "../conf/format.cfg", "%s" % self.cluster["bench_dir"])
+ common.scp(user, node, "../conf/vdbench_test.cfg", "%s" % self.cluster["bench_dir"])
+
+ def prerun_check(self):
+ super(self.__class__, self).prerun_check()
+ #1. check is vclient alive
+ user = self.cluster["user"]
+ vdisk = self.benchmark["vdisk"]
+ nodes = self.cluster["nodes_distribution"]
+ planed_space = str(len(self.instance_list) * int(self.volume_size)) + "MB"
+ common.printout("LOG","Prerun_check: check if rbd volume be intialized")
+ if not self.check_rbd_init_completed(planed_space):
+ common.printout("WARNING","rbd volume initialization has not be done")
+ self.prepare_images()
+
+ common.printout("LOG","Distribution nodes: %s" % nodes)
+ self.prepare_case(user, nodes)
+
+ common.printout("LOG","Prerun_check: check if rbd volume attached")
+ need_to_attach = False
+ stdout, stderr = common.pdsh(user, nodes, "fdisk -l %s" % vdisk, option="check_return")
+ res = common.format_pdsh_return(stdout)
+ if len(nodes) != len(res.keys()):
+ need_to_attach = True
+ if need_to_attach:
+ common.printout("WARNING","vclients are not attached with rbd volume")
+ self.attach_images()
+ common.printout("WARNING","vclients attached rbd volume now")
+ common.printout("LOG","Prerun_check: check if sysstat installed on %s" % nodes)
+ common.pdsh(user, nodes, "mpstat")
+ common.pdsh(user, nodes, "killall -9 java", option = "check_return")
+
+ def attach_images(self, to_attach_dict = None):
+ user = self.cluster["user"]
+ vdisk = self.benchmark["vdisk"]
+ dest_dir = self.cluster["tmp_dir"]
+ if not to_attach_dict:
+ to_attach_dict = self.benchmark["distribution"]
+ for client in to_attach_dict:
+ nodes = to_attach_dict[client]
+ for node in nodes:
+ common.printout("LOG","Attach rbd image to %s" % node)
+ stdout, stderr = common.pdsh(user, [node], "fdisk -l %s" % vdisk, option="check_return")
+ res = common.format_pdsh_return(stdout)
+ if node not in res:
+ common.pdsh(user, [client], "cd %s/vdbs; virsh attach-device %s %s.xml" % (dest_dir, node, node), except_returncode=1)
+ common.pdsh(user, [node], "mount | grep /dev/vdb1; if [ $? ne 0]; then parted -s -a optimal /dev/vdb mklabel gpt -- mkpart primary ext4 1 100%; mkfs -t ext4 /dev/vdb1; mount /dev/vdb1 /mnt; fi")
+
+ def detach_images(self):
+ user = self.cluster["user"]
+ vdisk = self.benchmark["vdisk"]
+ tmp_vdisk = re.search('/dev/(\w+)',vdisk)
+ vdisk_suffix = tmp_vdisk.group(1)
+ #for client in self.cluster["testjob_distribution"]:
+ for client in self.benchmark["distribution"]:
+ nodes = self.benchmark["distribution"][client]
+ for node in nodes:
+ common.printout("LOG","Detach rbd image from %s" % node)
+ stdout, stderr = common.pdsh(user, [node], "df %s" % vdisk, option="check_return")
+ if not stderr:
+ common.pdsh(user, [client], "virsh detach-disk %s %s" % (node, vdisk_suffix), except_returncode=1)
+
diff --git a/benchmarking/run_cases.py b/benchmarking/run_cases.py
index 9d809f7..3078449 100644
--- a/benchmarking/run_cases.py
+++ b/benchmarking/run_cases.py
@@ -75,6 +75,8 @@ def main(args):
benchmark = generic.Generic()
if testcase["engine"] == "hook":
benchmark = hook.Hook()
+ if testcase["engine"] == "vdbench":
+ benchmark = vdbench.VdBench()
if not benchmark:
common.printout("ERROR","Unknown benchmark engine")
try:
From c4355e31472df6f8be38fe3a3e2226ba8ffbebe0 Mon Sep 17 00:00:00 2001
From: ShaoShianXA
Date: Fri, 2 Sep 2016 17:13:20 +0800
Subject: [PATCH 03/54] fix a report list bug
Signed-off-by: ShaoShianXA
---
visualizer/visualizer.py | 23 ++++++++++++++++-------
1 file changed, 16 insertions(+), 7 deletions(-)
diff --git a/visualizer/visualizer.py b/visualizer/visualizer.py
index 7a62ecf..0f7eece 100644
--- a/visualizer/visualizer.py
+++ b/visualizer/visualizer.py
@@ -177,13 +177,19 @@ def update_report_list_db(self,tr_id,new_description):
def check_DB_case_list(self,re_dir,dbpath):
if os.path.exists(dbpath):
output = os.popen("ls "+re_dir)
- local_list = output.readlines()
- local_case_list = []
- for i in local_list:
- if i != 'cetune_report.db':
- local_case_list.append(i)
+ li = output.readlines()
+ local_list = []
+ for i in li:
+ if os.path.exists(os.path.join('/mnt/data/data2/',i.strip('\n'),i.strip('\n')+'.html')):
+ local_list.append(i.strip('\n'))
+ #local_case_list = []
+ #for i in local_list:
+ # if i != 'cetune_report.db':
+ # local_case_list.append(i)
DB_list = database.get_runid_list(dbpath)
- if local_case_list == DB_list:
+ local_list.sort()
+ DB_list.sort()
+ if local_list == DB_list:
return True
else:
return False
@@ -206,7 +212,8 @@ def generate_history_view(self, remote_host="127.0.0.1", remote_dir="/mnt/data/"
tr_start = re.search('()', line, re.S).group(1)
data = re.findall('| (.*?) | ', line, re.S)
- runid = int(data[0])
+ #runid = int(data[0])
+ runid = re.findall('id=(.*?)>', tr_start, re.S)[0]
if len(data) < 17:
data.insert(2, "")
formated_report[runid] = tr_start
@@ -219,6 +226,8 @@ def generate_history_view(self, remote_host="127.0.0.1", remote_dir="/mnt/data/"
database.createTB(dbpath)
rows = self.dataparse(formated_report)
runid_list = []
+ while [] in rows:
+ rows.remove([])
for i in rows:
runid_list.append(i[0])
if not database.check_case_exist(i[0],dbpath):
From 67e7245eef8311db61255a86c2dce3eb8ee96812 Mon Sep 17 00:00:00 2001
From: "lux.lu"
Date: Sun, 4 Sep 2016 21:21:40 +0800
Subject: [PATCH 04/54] save vdbench parameters for result show
Signed-off-by: Li Ning
---
benchmarking/mod/bblock/vdbench.py | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/benchmarking/mod/bblock/vdbench.py b/benchmarking/mod/bblock/vdbench.py
index a0ced5c..a6a95a5 100644
--- a/benchmarking/mod/bblock/vdbench.py
+++ b/benchmarking/mod/bblock/vdbench.py
@@ -157,6 +157,11 @@ def generate_benchmark_cases(self, testcase):
case_cfg.append("rd=rd1,fwd=fwd1,fwdrate=max,format=no,elapsed=%d,interval=1" % runtime)
with open("../conf/vdbench_test.cfg", "w+") as f:
f.write("\n".join(case_cfg)+"\n")
+
+ params_list = []
+ params_list.append("depth=%d,width=%d,files=%d,threads=%d,rdpct=%d" % (depth, width, files_num, threads_num, read_percentage))
+ with open("../conf/vdbench_params.txt", "w+") as f:
+ f.write("\n".join(params_list)+"\n")
return True
def parse_benchmark_cases(self, testcase):
@@ -194,6 +199,7 @@ def archive(self):
nodes = self.cluster["nodes_distribution"]
head = self.cluster["head"]
dest_dir = self.cluster["dest_dir"]
+ common.cp("%s/conf/vdbench_params.txt" % self.pwd, "%s/conf/" % dest_dir)
#collect client data
for node in nodes:
common.bash("mkdir -p %s/raw/%s" % (dest_dir, node))
From 7ad63017dfdc6ebe56cdd144a4f1cd1b1a7f4352 Mon Sep 17 00:00:00 2001
From: ShaoShianXA
Date: Mon, 5 Sep 2016 17:39:53 +0800
Subject: [PATCH 05/54] edit vdbench parameter file name
Signed-off-by: ShaoShianXA
---
analyzer/analyzer.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/analyzer/analyzer.py b/analyzer/analyzer.py
index c9bc2ac..830f10a 100644
--- a/analyzer/analyzer.py
+++ b/analyzer/analyzer.py
@@ -519,7 +519,7 @@ def getParameters(self):
dest_dir = self.cluster["dest_conf_dir"]
ps = ""
try:
- with open("%s/parameters.cfg" % dest_dir.replace("conf","raw"), 'r') as f:
+ with open("%s/vdbench_params.txt" % dest_dir.replace("raw","conf"), 'r') as f:
ps = f.read()
except:
pass
From 401efa973fe1c24c6e1f373eb71f66e9d845c248 Mon Sep 17 00:00:00 2001
From: ShaoShianXA
Date: Tue, 6 Sep 2016 22:42:21 +0800
Subject: [PATCH 06/54] delete description from all.conf
Signed-off-by: ShaoShianXA
---
conf/handler.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/conf/handler.py b/conf/handler.py
index dc0b941..6e74fc6 100644
--- a/conf/handler.py
+++ b/conf/handler.py
@@ -216,7 +216,7 @@ def list_required_config(self):
required_list["benchmark"]["collector"]="blktrace,strace,fatrace,lttng,perfcounter"
required_list["benchmark"]["perfcounter_data_type"]="osd,filestore"
required_list["benchmark"]["perfcounter_time_precision_level"]=6
- required_list["benchmark"]["Description"]="width=10,depth=1,files=10000,threads=16,rdpct=65"
+ #required_list["benchmark"]["Description"]="width=10,depth=1,files=10000,threads=16,rdpct=65"
required_list["workflow"] = OrderedDict()
required_list["workflow"]["workstages"] = ["deploy","benchmark"]
From 357955acf86fedc905b4e922e24d69be04e31f3c Mon Sep 17 00:00:00 2001
From: ShaoShianXA
Date: Wed, 7 Sep 2016 23:51:53 +0800
Subject: [PATCH 07/54] fix create case without parameters bug
Signed-off-by: ShaoShianXA
---
conf/config.py | 24 ++++++++++++++++++++++--
1 file changed, 22 insertions(+), 2 deletions(-)
diff --git a/conf/config.py b/conf/config.py
index fd6490c..238155c 100644
--- a/conf/config.py
+++ b/conf/config.py
@@ -300,13 +300,33 @@ def parse_benchmark_cases(self, testcase):
p = testcase
testcase_dict = {
"benchmark_driver":p[0],"worker":p[1], "container_size":p[2], "iopattern":p[3],
- "op_size":p[4], "object_size/QD":p[5], "rampup":p[6], "runtime":p[7], "device":p[8], "parameter":p[9]
+ "op_size":p[4], "object_size/QD":p[5], "rampup":p[6], "runtime":p[7], "device":p[8]
}
+
if len(p) == 11:
+ testcase_dict["parameter"] = p[9]
testcase_dict["description"] = p[10]
else:
- testcase_dict["description"] = ""
+ if len(p) == 9:
+ testcase_dict["parameter"] = ""
+ testcase_dict["description"] = ""
+ else:
+ if self.check_parameter_style(p[9]):
+ testcase_dict["parameter"] = p[9]
+ testcase_dict["description"] = ""
+ else:
+ testcase_dict["parameter"] = ""
+ testcase_dict["description"] = p[9]
return testcase_dict
+
+ def check_parameter_style(self,paras):
+ if paras != "":
+ for i in paras.split(','):
+ if len(i.split('=')) != 2:
+ return False
+ return True
+ else:
+ return False
class ConfigHelper():
def _check_config( self, key, value, requirement=None):
From 451c2434d7662c2ac366bb3fe3537fdfd5bfa5ac Mon Sep 17 00:00:00 2001
From: "lux.lu"
Date: Thu, 8 Sep 2016 09:13:02 +0800
Subject: [PATCH 08/54] change the case_type extracting method
Signed-off-by: Li Ning
---
analyzer/analyzer.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/analyzer/analyzer.py b/analyzer/analyzer.py
index 830f10a..0ae628e 100644
--- a/analyzer/analyzer.py
+++ b/analyzer/analyzer.py
@@ -55,7 +55,7 @@ def __init__(self, dest_dir):
self.result["description"] = self.getDescription()
def process_data(self):
- case_type = self.cluster["dest_dir"].split('/')[3].split('-')[2]
+ case_type = re.findall('\d\-\S+', self.cluster["dest_dir"])[0].split('-')[2]
if case_type == "vdbench":
self.result["description"] = "Description:"+ str(self.getDescription()) +" Parameters:"+ str(self.getParameters())
user = self.cluster["user"]
From b4c17f91d4199e21920315a9aa6f1f15a9592400 Mon Sep 17 00:00:00 2001
From: ShaoShianXA
Date: Fri, 9 Sep 2016 21:49:17 +0800
Subject: [PATCH 09/54] add user login function
Signed-off-by: ShaoShianXA
---
README.md | 12 +-
webui/account.conf | 3 +
webui/login.py | 35 ++++
webui/static/css/Style.css | 5 +-
webui/static/js/Script.js | 2 +-
webui/static/login/css/style.css | 220 +++++++++++++++++++++++++
webui/static/login/img/backimg.png | Bin 0 -> 254 bytes
webui/static/login/img/userpic1.png | Bin 0 -> 4592 bytes
webui/{static => templates}/index.html | 53 +++---
webui/templates/login.html | 28 ++++
webui/webui.py | 43 ++++-
11 files changed, 362 insertions(+), 39 deletions(-)
create mode 100644 webui/account.conf
create mode 100644 webui/login.py
create mode 100644 webui/static/login/css/style.css
create mode 100644 webui/static/login/img/backimg.png
create mode 100644 webui/static/login/img/userpic1.png
rename webui/{static => templates}/index.html (90%)
create mode 100644 webui/templates/login.html
diff --git a/README.md b/README.md
index b9359f6..a0fc143 100644
--- a/README.md
+++ b/README.md
@@ -58,7 +58,7 @@ http://0.0.0.0:8080/
```
- CeTune WebUI
-
+
* * *
####Configure
@@ -91,11 +91,11 @@ Assume ceph is installed on all nodes, this part is demonstrate the workflow of
- Uncheck 'Benchmark' and only check 'Deploy', then click 'Execute'
-
+
- WebUI will jump to 'CeTune Status' and you will about to see below console logs
-
+
* * *
@@ -112,11 +112,11 @@ Assume ceph is installed on all nodes, this part is demonstrate the workflow of
* * *
####Check Benchmark Results
-
+
-
+
-
+
* * *
diff --git a/webui/account.conf b/webui/account.conf
new file mode 100644
index 0000000..24304d0
--- /dev/null
+++ b/webui/account.conf
@@ -0,0 +1,3 @@
+[account]
+admin = 123456
+root = 123456
diff --git a/webui/login.py b/webui/login.py
new file mode 100644
index 0000000..03b9e4b
--- /dev/null
+++ b/webui/login.py
@@ -0,0 +1,35 @@
+#/usr/bin/python
+import ConfigParser
+import os
+import collections
+
+class UserClass(object):
+ @classmethod
+ def read_conf_to_dict(self):
+ self.cf = ConfigParser.ConfigParser()
+ self.dict = collections.OrderedDict()
+ if os.path.exists("account.conf"):
+ self.cf.read("account.conf")
+ self.data = self.cf.items("account")
+ self.data = sorted(self.data)
+ for i in range(len(self.data)):
+ self.dict[self.data[i][0]]=self.data[i][1]
+ else:
+ print "ERROR:account.conf not exists."
+ return self.dict
+
+ @classmethod
+ def get_all_account(self):
+ return UserClass.read_conf_to_dict()
+
+ @classmethod
+ def check_account(self,key=[' ',' ']):
+ result = 'false'
+ if key[0] in UserClass.read_conf_to_dict().keys():
+ if UserClass.read_conf_to_dict()[key[0]] == key[1]:
+ result = 'true'
+ return result
+ return result
+ else:
+ print "ERROR:user not exists."
+ return result
diff --git a/webui/static/css/Style.css b/webui/static/css/Style.css
index 9cde7b5..e2c5b1c 100644
--- a/webui/static/css/Style.css
+++ b/webui/static/css/Style.css
@@ -31,8 +31,9 @@ body{ margin:0px; padding:0px;}
/***************************************************************************************************************/
#div_top_id{ position:fixed; top:25px; z-index:100; width:100%; height:55px; background-color:#337ab7; border-bottom:1px solid #2e6da4; box-shadow:2px 2px 3px #aaa;}
-#div_top_title_id{ float:left; width:50%; height:50px;}
-#div_top_status_id{ float:right; width:45%; height:50px;}
+#div_top_title_id{ float:left; width:45%; height:50px;}
+#div_top_status_id{ float:left; width:45%; height:50px;}
+#div_user_status_id{ float:right; width:10%;font-size:14px; height:50px;color:#FFF;font-weight:bold;margin-top:5px;}
#div_top_id h1{ margin:0; font-family:Arial, Helvetica, sans-serif; font-style:italic;font-weight:500;
line-height:1.1; font-size: 20px; color: #FFF;
diff --git a/webui/static/js/Script.js b/webui/static/js/Script.js
index 97a26d3..ec72313 100644
--- a/webui/static/js/Script.js
+++ b/webui/static/js/Script.js
@@ -237,7 +237,7 @@ function Report_Timer(init){
appendHtml +="";
- appendHtml +="";
+ appendHtml +="";
appendHtml +="";
diff --git a/webui/static/login/css/style.css b/webui/static/login/css/style.css
new file mode 100644
index 0000000..7d4055f
--- /dev/null
+++ b/webui/static/login/css/style.css
@@ -0,0 +1,220 @@
+/*
+ *
+ * Template Name: Fullscreen Login
+ * Description: Login Template with Fullscreen Background Slideshow
+ * Author: Anli Zaimi
+ * Author URI: http://azmind.com
+ *
+ */
+
+
+body {
+ background: #f8f8f8;
+ background-image:url(../img/backimg.png);
+ text-align: center;
+ font-size:14px;
+ color: #fff;
+ font-family:Microsoft YaHei,Segoe UI,Tahoma,Arial,Verdana,sans-serif;
+}
+
+#loading {
+ margin:0 auto;
+ display:none;
+}
+/*#loading img{
+ border-radius: 50%;
+ width: 500px;
+ height: 500px;
+ overflow: hidden;
+ position:relative;
+}*/
+#loading img{
+ position:absolute;
+ content:'';
+ width:450px;
+ height:380px;
+ top:0;left:0;
+ border-radius:10%;
+ box-shadow:0 0 30px 10px rgba(255,255,255,.7) inset;
+}
+
+.page-container {
+ width:350px;
+ height:auto;
+ margin: 120px auto 0 auto;
+}
+
+h1 {
+ font-size: 30px;
+ font-weight: 700;
+ text-shadow: 0 1px 4px rgba(0,0,0,.2);
+}
+
+form {
+ position: relative;
+ width: 350px;
+ margin: 15px auto 0 auto;
+ text-align: center;
+}
+
+#username,#password {
+ width: 270px;
+ height: 42px;
+ margin-top: 25px;
+ padding: 0 15px;
+ background: #2d2d2d; /* browsers that don't support rgba */
+ background: rgba(45,45,45,.15);
+ -moz-border-radius: 6px;
+ -webkit-border-radius: 6px;
+ border-radius: 6px;
+ border: 1px solid #3d3d3d; /* browsers that don't support rgba */
+ border: 1px solid rgba(255,255,255,.15);
+ -moz-box-shadow: 0 2px 3px 0 rgba(0,0,0,.1) inset;
+ -webkit-box-shadow: 0 2px 3px 0 rgba(0,0,0,.1) inset;
+ box-shadow: 0 2px 3px 0 rgba(0,0,0,.1) inset;
+ font-family: 'PT Sans', Helvetica, Arial, sans-serif;
+ font-size: 14px;
+ color: #fff;
+ text-shadow: 0 1px 2px rgba(0,0,0,.1);
+ -o-transition: all .2s;
+ -moz-transition: all .2s;
+ -webkit-transition: all .2s;
+ -ms-transition: all .2s;
+}
+
+#username,#password :-moz-placeholder { color: #fff; }
+#username,#password :-ms-input-placeholder { color: #fff; }
+#username,#password ::-webkit-input-placeholder { color: #fff; }
+
+#username,#password :focus {
+ outline: none;
+ -moz-box-shadow:
+ 0 2px 3px 0 rgba(0,0,0,.1) inset,
+ 0 2px 7px 0 rgba(0,0,0,.2);
+ -webkit-box-shadow:
+ 0 2px 3px 0 rgba(0,0,0,.1) inset,
+ 0 2px 7px 0 rgba(0,0,0,.2);
+ box-shadow:
+ 0 2px 3px 0 rgba(0,0,0,.1) inset,
+ 0 2px 7px 0 rgba(0,0,0,.2);
+}
+
+button {
+ cursor: pointer;
+ width: 300px;
+ height: 44px;
+ margin-top: 25px;
+ padding: 0;
+ background: #ef4300;
+ -moz-border-radius: 6px;
+ -webkit-border-radius: 6px;
+ border-radius: 6px;
+ border: 1px solid #ff730e;
+ -moz-box-shadow:
+ 0 15px 30px 0 rgba(255,255,255,.25) inset,
+ 0 2px 7px 0 rgba(0,0,0,.2);
+ -webkit-box-shadow:
+ 0 15px 30px 0 rgba(255,255,255,.25) inset,
+ 0 2px 7px 0 rgba(0,0,0,.2);
+ box-shadow:
+ 0 15px 30px 0 rgba(255,255,255,.25) inset,
+ 0 2px 7px 0 rgba(0,0,0,.2);
+ font-family: 'PT Sans', Helvetica, Arial, sans-serif;
+ font-size: 14px;
+ font-weight: 700;
+ color: #fff;
+ text-shadow: 0 1px 2px rgba(0,0,0,.1);
+ -o-transition: all .2s;
+ -moz-transition: all .2s;
+ -webkit-transition: all .2s;
+ -ms-transition: all .2s;
+}
+
+button:hover {
+ -moz-box-shadow:
+ 0 15px 30px 0 rgba(255,255,255,.15) inset,
+ 0 2px 7px 0 rgba(0,0,0,.2);
+ -webkit-box-shadow:
+ 0 15px 30px 0 rgba(255,255,255,.15) inset,
+ 0 2px 7px 0 rgba(0,0,0,.2);
+ box-shadow:
+ 0 15px 30px 0 rgba(255,255,255,.15) inset,
+ 0 2px 7px 0 rgba(0,0,0,.2);
+}
+
+button:active {
+ -moz-box-shadow:
+ 0 15px 30px 0 rgba(255,255,255,.15) inset,
+ 0 2px 7px 0 rgba(0,0,0,.2);
+ -webkit-box-shadow:
+ 0 15px 30px 0 rgba(255,255,255,.15) inset,
+ 0 2px 7px 0 rgba(0,0,0,.2);
+ box-shadow:
+ 0 5px 8px 0 rgba(0,0,0,.1) inset,
+ 0 1px 4px 0 rgba(0,0,0,.1);
+
+ border: 0px solid #ef4300;
+}
+
+.error {
+ display: none;
+ position: absolute;
+ top: 27px;
+ right: -55px;
+ width: 40px;
+ height: 40px;
+ background: #2d2d2d; /* browsers that don't support rgba */
+ background: rgba(45,45,45,.25);
+ -moz-border-radius: 8px;
+ -webkit-border-radius: 8px;
+ border-radius: 8px;
+}
+
+.error span {
+ display: inline-block;
+ margin-left: 2px;
+ font-size: 40px;
+ font-weight: 700;
+ line-height: 40px;
+ text-shadow: 0 1px 2px rgba(0,0,0,.1);
+ -o-transform: rotate(45deg);
+ -moz-transform: rotate(45deg);
+ -webkit-transform: rotate(45deg);
+ -ms-transform: rotate(45deg);
+
+}
+
+.connect {
+ width: 305px;
+ margin: 35px auto 0 auto;
+ font-size: 18px;
+ font-weight: 700;
+ text-shadow: 0 1px 3px rgba(0,0,0,.2);
+}
+
+.connect a {
+ display: inline-block;
+ width: 32px;
+ height: 35px;
+ margin-top: 15px;
+ -o-transition: all .2s;
+ -moz-transition: all .2s;
+ -webkit-transition: all .2s;
+ -ms-transition: all .2s;
+}
+
+.connect a.facebook { background: url(../img/facebook.png) center center no-repeat; }
+.connect a.twitter { background: url(../img/twitter.png) center center no-repeat; }
+
+.connect a:hover { background-position: center bottom; }
+
+/*---增加部份的CSS---*/
+#username,#password :{font-family:Microsoft YaHei,Segoe UI,Tahoma,Arial,Verdana,sans-serif; text-decoration:none; width:80%;}
+form,button{font-family:Microsoft YaHei,Segoe UI,Tahoma,Arial,Verdana,sans-serif; text-decoration:none; width:80%;}
+button.submit_button{ font-size:24px; letter-spacing:15px;}
+#username.Captcha{ width:130px; float:left;}
+#password.Captcha{ width:130px; float:left;}
+
+
+
+
diff --git a/webui/static/login/img/backimg.png b/webui/static/login/img/backimg.png
new file mode 100644
index 0000000000000000000000000000000000000000..5f4d0452aab7e9656aa84c9d1daf5b4b4b2e6977
GIT binary patch
literal 254
zcmeAS@N?(olHy`uVBq!ia0y~yU~d4jxj2}BM1MG8=GvZsqRi?!|9abY_v3q>NLeh`yzbDi4aC#iUs$#!onr)=!EnHS&y;u&
zgW-o;0~?55(89nAq8&IHr9gCp5R(arW>96x0MlL^7r^usfdybAG!z|}K>`5_wn}?G
VX7`r8yd(m|_jL7hS?83{1OPdTMri;5
literal 0
HcmV?d00001
diff --git a/webui/static/login/img/userpic1.png b/webui/static/login/img/userpic1.png
new file mode 100644
index 0000000000000000000000000000000000000000..3444daec49bc4affa2abee6670d75b4cf2079592
GIT binary patch
literal 4592
zcmVPx#1ZP1_K>z@;j|==^1poj532;bRa{vGi!vFvd!vV){sAK>D5rs)aK~z{rrC8^G
zTjzDgztUk5>*7uflGuAslHi^o0fGYrKoTSYkOWC_X^WyHOO~u*S#fNqY5eP#x=ou{
ziKW<*<2JQF$FL;2
zZn3tDu_idju*?+Sr|Xkh203?%b)}@nq|}gP9T~whu5gZt%_Ed8EqHRQYeHg4NiCzS
zJIi|~D0@n57?fK^c=tHx%E-(^5<{G^k8sYE+&n1O^-<+
zcnf@RP8lxYqQX7Rc=JMV4vxZIg%hsia1#aYX&c3zS!Y^d9hMps$TEq9B_YxEh~(zD
zSRX^=8E2}x^wR
z(EgIbS5OA$HIYSpLE)Z4RNyi1o1q+8%8}vRlbmZD$N;Dnwo%qGs<0-3O{u<5RHN%d
z{DBwRo&tPimQl_ZK>D;yKFW1J@ioCPp}30PztqjFPR
zgn+@uL)*1g?eRQ?H<&d&*T@hU78V}ZY9B;`!vw7vG*Z{(K!
z-bv0qjxdM~sKH@)0P|VIM_~uUWQqAu3BRE{1;)RiX$Bs3kYZ#`Pqa)b}B(18UZvR~b>pY_dB
zt|`JF{yZvNf~cE^Sl1W`SH!sy5W;SRZG3q+%lT(%PmvEU)HNSaHysdy2iVA}s^y5b
zYok4T?#FMu_vrDHN~K)cEthvImEGO)?smDny|umd?gyWZl`iW$H`J}ixyYK(K=7e%
zT4ntu#s@%@l%aVVS+)|A0E;uuF^MT5LXAqzBk+LtgH?*OXNL9^NaCB%E-*r;gdE)E~zxeveHq7tt!t{5qZFo}It!!7e-hTHJL+c9Lcu3iNRNHU
zPOF+vn7dBieCPK&<;RuE&YyXJx3G_>l%L%G>V7D`qG>&*?>ST3eu4|H@{!frmV+v6
z##4~F2(76?MW8`qLg}S=cUA;sDt*(OcN&DM@JuVisB9F~BFw7@?l!b-r}*e`Wy_{_
z@WMOq-QKM{A%Xu34|aEul-=_7y}Ngs6Z`dDoAsTi`KH6_=;8XdV?ZT?;`Gl8APG>5
zJ14QG;e^tM;>?J^nhfFHaS$p9hYu}*;MmZTrsYt5+X;2^33X&c(|XF#cg{b0`B(3M
zQ7M=IA`8_JzyoBWa`)c7Xna`{J*sZmtm`tEfyABip2wj49|K5OVXr*1vLHy(r&q@W_pa!--=AqypRRIrZJJPLw>2Pxh=
zB?M;CV-%hu%!A#4ARt-UbhNhfl(F}$uIH?_^So#1xf}0%SpH)SzHSqC#j1#HWk^
zK5MF`BlRSVXX-l6XuHms`mcCLpa0=c-re(nWPNuJkJVN602e>_?eA6Il%eMtZTGqQ
zuCvZ`YWF3XAZd~I6i>Ni0WFe1sg-#)&s~|v~03$+pi3u#Sp=C|;VQt5z
zrfpNxdJ^W%@ypi4Ra^YZsqeqBRYA&kcK?_N9LalfQYruRwO=Y68JIW5E}8l-0FNqQ
zA_{^!rjDXA30o^e3##xuh+m3cWgQhsZE3VG^fB~%bU(xjU#)99rfNQln$~rmF~y!W
z^k35VT`&)PZ}QNqAAWIvhg9Qt1&Jek!k^05_wQ%&2f0Ae9KT{6ysGOtuW373+rC+k
z%`_k3!Ydr<6f4TmqQX5RwU3iTcqc`SdlK3PT0`N@lYZZXa2-@O9#BP()OVh$#uMwj
zrT>y`=%t477cczi=Di1xs#zc`RP_K!+1)8uE=#y4c@J!MHeukX8vEa*Lk|OW3#U7
zoVow9GjY|FxN1&b$#4Ao?>_%(ulxur@9}bF_tC>A?eS?{`?@U-^OtnJ7mTrs`krS1
zMrHJfru7Kw3o%f5p{(YF;5;EfXGUfn5y|aov@Fud(MbUlXpN>dZTqphw&Q&BMs3$A
zbN>Z<{5f;qB}448YxKqZ#!r6x$(MhML}I^OdHC>Q^I*}`_e_24iXr}-J#op>cV5$e
zTG@8IuH$%J%TZ*3^3Ais5?BXGM+)TDG@7D_vS+{#xP<1x_@Ka6G%ct@RO6w#)(u1N
z8AI=RZP$5i&n0K_YVYhnTzuv2dk-F0J-{M3C=*~-wvmOGUv0@=bR}MJ41V9#e@Wf>
zw5H=}UDwmv))S}&Cb*xEtf(57p)1iRAwR(a1P4&!pyaBuD?$LO8<)B8GS{%CX*;S%
z7TV7UZD(}d=OUT!9lCV=+FKvqzWZ>86vCe%v04}SbML`}H{SW+?8|R;Oua}D(0$I(
zeYURkB-d~lx<(yEr(I*w_gzHLLR+&a2UO!;YeqQ`Jy;b*d=S0lNL|M7?fYMEJt4(Ns`1+Z;6+k^|L6*p^43oI>pKq~y!+AZb1%Qukb1thV}nIF
z8xPjD9M(h+GJbGw9x)*N>zk2SP>mxZv}OoMU3vM&
zuRgo~=*iX&S|YJs-hDz+KmhoM7kOK~LNWt)cFNn8hg(~}eE*Y)!#}iipVFc^HLufw
z5>yXZM+2$tf%!CL%Lu-lh=D#sxAsn<9N54tQQr;wnc%Xy^YGP9=!a|H^U>RncxzOX4|j?7=ryk28=7G^i46&
zG+||hJp(*K(JC9}5d##ACbD$m%D>(D<`FUX?W$V&v-RD@y*D^kBT>CrJ;7P|?l+H5
zT>2+{6il-wK%}^)p~5i@pgF;4jebys0Rdwk6uJ_ZJUOYe$ORW0hEKlz-e+6oEt2B@
z%L_3e$k?p_8QZVD`CqN6<7}WLcY%T+Qu4~cjLe>{F%DsR5y{Djg61@ZAq$HS%%gp!
z_CNo-&%Xq;$UgpR{V!ewr{tT}y!`t8j|Xxaa_0o&n^6TZKFr8%8HiP+fcKGFgk{
z7$r?Z7%oB2n5{5zg0V{{U-|6IyU^k&y}!Ea|GY?Lk}!~5V14}QC*yO+dCxfIo`f)A
z+~a$LJ!VJ@Eh6Y;g*^q2Xh%k7&wv<|fkM3S?2rES#qigAr|-YIm;`fwo}TWXBWhI{gXu6(#>wsBygp?@*l
zH;>g6UusJ1kMu8u`%1V7X&Ox|MPlS5e61u;vI!ZgouzRFmsv8g+`)ON}whoFIYeHt|7wh}s1nV55
zY$K#JtYd;_5(~eSLp^w>Rly>{D|hB7Hz;KuT95Hz#LI&}&`~kz(#{#iomYWQhz6Vn
z=&IZ|&AKN*dU7j-IkKFVng(HuG9_5Ykis%hqwNxjwOuv!-LxfH#YM74AE&HCyeEU=
zk(yJ$0qaF1@_?trmXW(Au{kWtJq2$#e-YCl<1fe@IW!wha2|uN+%+MwWl2qaI~)$n
zt(eYWeoSmg)aVBkwj}2qp-lbzYTKo{E>TT=J8kJ_tVyY1V4toJz~r1@j8V*!GV?I}
zKnwv-F(6{k!Zzd#v^5r)dk|{ZlnheGJ&ArV_zNU`7V;oQ>C4j&>;ae`!Ei|bA@Q_r
z7)h5J2lmyr$@E>UwO>SAVhVFitn04P^}u{J5`z-M05BoAvw$a(A~q(;Q$@5g_XH$1
z<;ZeA=$a`cNRF1~$*KG^boKcvgcsNb=|gQxj6<{|1>RNIhQ)@s)RbWC!;CE{(e)zg
z3R5r4!wC^(?A}+~D%N%)E3`Es(Z|I4KH4_KyT%mOVX=My2t}a^o(Y8wkst%F;GJY$
z6PURW8{Q49aMD#vaBV7wdbTs_Uf8apZ}y4?R`iTcaCb9bBb>taK3yE7E4b5u{ZraLz#h0`ZAol>UtJexjdQlRRM+`;s_5Sc
ajsFe$VPHf%38z#50000
CeTune
-
-
-
-
-
-
-
+
+
+
+
+
+
+
-
-
+
+
-
-
-
+
+
+
-
-
-
-
-
+
+
+
+
+
@@ -57,6 +57,9 @@ Benchmark is runing.....
+
@@ -74,19 +77,19 @@
@@ -398,7 +401,7 @@
-
+
+
+
+
diff --git a/webui/templates/login.html b/webui/templates/login.html
new file mode 100644
index 0000000..e54c31b
--- /dev/null
+++ b/webui/templates/login.html
@@ -0,0 +1,28 @@
+
+
+
+