Skip to content
Permalink

Comparing changes

Choose two branches to see what’s changed or to start a new pull request. If you need to, you can also or learn more about diff comparisons.

Open a pull request

Create a new pull request by comparing changes across two branches. If you need to, you can also . Learn more about diff comparisons here.
base repository: tsuna/sysbench-tools
Failed to load repositories. Confirm that selected base ref is valid, then try again.
Loading
base: master
Choose a base ref
...
head repository: cbowman0/sysbench-tools
Failed to load repositories. Confirm that selected head ref is valid, then try again.
Loading
compare: master
Choose a head ref
Able to merge. These branches can be automatically merged.
  • 4 commits
  • 3 files changed
  • 1 contributor

Commits on Feb 24, 2013

  1. Work with python in CentOS5

    Christopher Bowman committed Feb 24, 2013
    Copy the full SHA
    4c791d5 View commit details
  2. First hack at using this setup for performing sysbench tests against …

    …MySQL
    Christopher Bowman committed Feb 24, 2013
    Copy the full SHA
    362f12c View commit details
  3. Fix required option output directory

    Christopher Bowman committed Feb 24, 2013
    Copy the full SHA
    4b05649 View commit details
  4. Parmaterize sysbench binary and sysbench library location

    Add args for sysbench binary and sysbench library.  Verify those args are valid.
    Christopher Bowman committed Feb 24, 2013
    Copy the full SHA
    003713d View commit details
Showing with 415 additions and 3 deletions.
  1. +186 −0 runmysqltests.sh
  2. +9 −3 sysbench-log2json.py
  3. +220 −0 sysbench-mysql-log2json.py
186 changes: 186 additions & 0 deletions runmysqltests.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,186 @@
#!/bin/bash
#
# Run Sysbench tests against a MySQL database
# Expect Sysbench version 0.5

#sysbench arguments
#http://www.percona.com/docs/wiki/benchmark:sysbench:olpt.lua
SYSBENCH_TESTS_DEFAULT_ARGS=" _SYSBENCH_ \
--test=_TESTDIR_/_TEST_ \
--oltp-table-size=_TABLESIZE_ \
--oltp-tables-count=_NUMTABLES_ \
--max-time=300 \
--max-requests=0 \
--mysql-user=_USER_ \
--mysql-password=_PASS_ \
--mysql-host=_DBHOST_ \
--mysql-db=_DBNAME_ \
--mysql-table-engine=InnoDB \
--mysql-engine-trx=yes \
--num-threads=_NUMTHREAD_ "

SYSBENCH_TESTS="delete.lua insert.lua oltp.lua select.lua select_random_points.lua select_random_ranges.lua update_index.lua update_non_index.lua"


################################################################################
#
# No need to touch anything below here
#

usage()
{
cat << EOF
usage: $0 options
Perform the sysbench test against MySQL. Runs the following db tests:
$SYSBENCH_TESTS
OPTIONS:
-h Show this message
-b SysBench Binary (w/ full path)
-l SysBench Library (sysbench/tests/db)
-s Database Host
-d Database Name
-u User in Database
-p Password of Database User
-i Size of tables
-n Number of tables
-t Test Name
-o Output Directory
-v Verbose
EOF
}

DBHOST=
DBNAME="sbtest"
DBUSER="sbtest"
DBPASS="sbtest1"
TABLESIZE=2000000
NUMTABLES=8
TESTNAME=
VERBOSE=0
SYSBENCH=
SYSBENCH_DB_TESTS=
OUTDIR=

while getopts “hs:d:u:p:i:n:t:o:b:l:v” OPTION
do
case $OPTION in
h)
usage
exit 1
;;
b)
SYSBENCH=$OPTARG
;;
l)
SYSBENCH_DB_TESTS=$OPTARG
;;
s)
DBHOST=$OPTARG
;;
d)
DBNAME=$OPTARG
;;
u)
DBUSER=$OPTARG
;;
p)
DBPASS=$OPTARG
;;
i)
TABLESIZE=$OPTARG
;;
n)
NUMTABLES=$OPTARG
;;
t)
TESTNAME=$OPTARG
;;
o)
OUTDIR=$OPTARG
;;
v)
VERBOSE=1
;;
?)
usage
exit
;;
esac
done

if [[ -z $DBHOST ]] || [[ -z $TESTNAME ]] || [[ -z $OUTDIR ]] || [[ -z $SYSBENCH ]] || [[ -z $SYSBENCH_DB_TESTS ]]
then
usage
exit 1
fi

# Verify inputs are valid
if [[ ! -x $SYSBENCH ]]
then
echo "Sysbench binary not found at $SYSBENCH"
exit 1
fi

if [[ ! -d $SYSBENCH_DB_TESTS ]]
then
echo "SysBench DB Test directory not accessible: $SYSBENCH_DB_TESTS"
exit 1
fi

for SBTEST in $SYSBENCH_TESTS; do
if [[ ! -f "$SYSBENCH_DB_TESTS/$SBTEST" ]]
then
echo "Sysbench test $SBTEST not found in $SYSBENCH_DB_TESTS."
echo "Please verify SysBench library location"
exit 1
fi
done


# No spaces in name
TEST_NAME="$TESTNAME-TableSize-$TABLESIZE-Tables-$NUMTABLES"

NUMTHREADS="1 4 8 16 32 64 128"
NUMITERATIONS="1 2 3 4"

#Set start date?
# date +%Y%m%d%H%M%S

for SBTEST in $SYSBENCH_TESTS; do
for NUMTHREAD in $NUMTHREADS; do
TEST_EXEC="$SYSBENCH_TESTS_DEFAULT_ARGS"
mkdir -p $OUTDIR/$TEST_NAME
exec >$OUTDIR/$TEST_NAME/$SBTEST-$NUMTHREAD 2<&1
echo "`date` TESTING $TEST_NAME-$SBTEST-$NUMTHREAD"

#Perform _VARIABLE_ substitutions
TEST_EXEC=${TEST_EXEC/_SYSBENCH_/$SYSBENCH}
TEST_EXEC=${TEST_EXEC/_TESTDIR_/$SYSBENCH_DB_TESTS}
TEST_EXEC=${TEST_EXEC/_USER_/$DBUSER}
TEST_EXEC=${TEST_EXEC/_PASS_/$DBPASS}
TEST_EXEC=${TEST_EXEC/_DBHOST_/$DBHOST}
TEST_EXEC=${TEST_EXEC/_DBNAME_/$DBNAME}
TEST_EXEC=${TEST_EXEC/_TABLESIZE_/$TABLESIZE}
TEST_EXEC=${TEST_EXEC/_NUMTABLES_/$NUMTABLES}

PREPARE_EXEC="$TEST_EXEC"

for i in $NUMITERATIONS; do
echo "`date` start iteration $i"
P=${PREPARE_EXEC/_TEST_/parallel_prepare.lua}
P=${P/_NUMTHREAD_/$NUMTHREAD}
echo $P run
$P run
T=${TEST_EXEC/_TEST_/$SBTEST}
T=${T/_NUMTHREAD_/$NUMTHREAD}
echo $T run
$T run
$P cleanup
done
echo "`date` DONE TESTING $TEST_NAME-$SBTEST-$NUMTHREAD"
sleep 30
done
# date | mail -s "$mode benchmarks done" your@email.here
done
12 changes: 9 additions & 3 deletions sysbench-log2json.py
Original file line number Diff line number Diff line change
@@ -15,7 +15,7 @@

"""Parse sysbench's output and transform it into JSON."""

import json
import simplejson as json
import os
import re
import sys
@@ -182,15 +182,19 @@ def main(args):
return 2
if config not in config2results:
config2results[config] = {}
with open(arg) as f:
f = open(arg)
try:
process(f, config2results[config])
finally:
f.close()

for config, results in config2results.iteritems():
for test_mode, data in results.iteritems():
data["averages"] = dict((metric, [[num_threads, sum(vs) / len(vs)]
for num_threads, vs in sorted(values.iteritems())])
for metric, values in data["results"].iteritems())
with open("results.js", "w") as f:
f = open("results.js", "w")
try:
f.write("TESTS = ");
json.dump(TESTS, f, indent=2)
f.write(";\nMETRICS = {\n");
@@ -199,6 +203,8 @@ def main(args):
f.write("\n};\nresults = ");
json.dump(config2results, f, indent=2)
f.write(";")
finally:
f.close()


if __name__ == "__main__":
220 changes: 220 additions & 0 deletions sysbench-mysql-log2json.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,220 @@
#!/usr/bin/python
# Copyright (C) 2011 Benoit Sigoure
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.

"""Parse sysbench's output and transform it into JSON."""

import simplejson as json
import os
import re
import sys


TOBYTES = {
"K": 1024,
"M": 1024 * 1024,
"G": 1024 * 1024 * 1024,
"T": 1024 * 1024 * 1024 * 1024,
}

TESTS = {
}

METRICS = (
# ("num_threads", "Number of threads"),
# ("test_mode", "Test mode"),
("nread", "Number of read queries"),
("nwrite", "Number of write queries"),
("nother", "Number of other queries"),
("ntotal", "Total number of queries"),
("transactions", "Transactions"),
("deadlocks", "Deadlocks"),
("rwrequests", "Total RW Requests"),
("otherrequests", " Total Other Requests"),
("total_time", "Total time"),
("total_num_events", "Total number of events"),
#("total_exec_time", "Total execution time"),
("req_min", "Min. latency"),
("req_avg", "Avg. latency"),
("req_max", "Max. latency"),
("req_95p", "95th percentile latency"),
# Derived metrics
("nreadps", "Reads/s"),
("nwriteps", "Writes/s"),
)

SORTED_METRICS = tuple(metric for metric, description in METRICS)
METRICS = dict(METRICS)

def tobytes(s):
"""Helper to convert, say, "1.42Mb" into `1488977.92'."""
if "A" <= s[-2] <= "Z":
return float(s[:-2]) * TOBYTES[s[-2]]
return int(s[:-1])


def toms(s):
"""Helper to convert, say, "1.42s" into `1420'."""
if s.endswith("ms"):
return float(s[:-2])
return float(s[:-1]) * 1000 # sec -> msec


def process(f, results):
"""Populate the results dict with data parsed from the file."""
test_mode = num_threads = None
resp_stats = query_stats = None

data = None
def record(metric, value):
data[metric].setdefault(num_threads, []).append(value)

line = None
def match(regexp):
m = re.match(regexp, line.strip())
assert m, "%r did not match %r" % (line.strip(), regexp)
return m

for line in f:
# Base case to start new section
if line.endswith("run\n"):
sysbench_args = dict(arg.lstrip("-").split("=", 1)
for arg in line.split()
if "=" in arg)
num_threads = int(sysbench_args["num-threads"])
test_mode = os.path.basename(sysbench_args["test"])
resp_stats = False

if test_mode not in TESTS:
TESTS[test_mode] = test_mode

if test_mode not in results:
data = dict((metric, {}) for metric in METRICS)
results[test_mode] = {
"results": data,
}
else:
data = results[test_mode]["results"]
# Read until we get to the base case the first time
elif test_mode is None:
continue

elif line == " queries performed:\n":
query_stats = True
elif line == " response time:\n":
resp_stats = True

elif query_stats and line.startswith(" transactions:"):
transactions = int(line.split()[1])
record("transactions", transactions)
elif query_stats and line.startswith(" deadlocks:"):
deadlocks = int(line.split()[1])
record("deadlocks", deadlocks)
elif query_stats and line.startswith(" read/write requests:"):
requests = int(line.split()[2])
record("rwrequests", requests)
elif query_stats and line.startswith(" other operations:"):
other = int(line.split()[2])
record("otherrequests", other)
elif line.startswith(" total time:"):
total_time = toms(line.split()[-1])
#record("total_time", total_time)
total_time /= 1000
record("nreadps", nread / total_time)
record("nwriteps", nwrite / total_time)
elif line.startswith(" total number of events:"):
record("total_num_events", int(line.split()[-1]))
#elif line.startswith(" total time taken by event execution:"):
# record("total_exec_time", float(line.split()[-1]))
elif line == "\n":
query_stats = False
resp_stats = False

elif query_stats and ": " in line:
stat, value = line.split(":")
stat = stat.strip()
value = int(value.strip())
if stat == "read":
nread = value
record("nread", value)
elif stat == "write":
nwrite = value
record("nwrite", value)
elif stat == "other":
nother = value
record("nother", value)
elif stat == "total":
ntotal = value
record("ntotal", value)
else:
assert False, repr(stat)

elif resp_stats and ": " in line:
stat, value = line.split(":")
stat = stat.strip()
value = toms(value.strip())
if stat == "min":
record("req_min", value)
elif stat == "avg":
record("req_avg", value)
elif stat == "max":
record("req_max", value)
elif stat == "approx. 95 percentile":
record("req_95p", value)
else:
assert False, repr(stat)


def main(args):
args.pop(0)
if not args:
print >>sys.stderr, "Need at least one file in argument"
return 1
# maps a config name to results for this config
config2results = {}
for arg in args:
config = os.path.basename(os.path.dirname(arg))
if not config:
print >>sys.stderr, ("Error: %r needs to be in a directory named after"
" the config name" % (arg))
return 2
if config not in config2results:
config2results[config] = {}
f = open(arg)
try:
process(f, config2results[config])
finally:
f.close()

for config, results in config2results.iteritems():
for test_mode, data in results.iteritems():
data["averages"] = dict((metric, [[num_threads, sum(vs) / len(vs)]
for num_threads, vs in sorted(values.iteritems())])
for metric, values in data["results"].iteritems())
f = open("results.js", "w")
try:
f.write("TESTS = ");
json.dump(TESTS, f, indent=2)
f.write(";\nMETRICS = {\n");
f.write("\n".join(' "%s": "%s",' % (metric, METRICS[metric])
for metric in SORTED_METRICS))
f.write("\n};\nresults = ");
json.dump(config2results, f, indent=2)
f.write(";")
finally:
f.close()


if __name__ == "__main__":
sys.exit(main(sys.argv))