%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
import math
import sh
sns.set(font_scale=1.5)
IOSTAT_COLUMNS = ['r/s', 'w/s', 'kr/s', 'kw/s', 'wait', 'actv', 'wsvc_t', 'asvc_t', '%w', '%b', 'device']
TEST = 'fixed-rate-submit'
TIMEOUT = 5
NUMJOBS = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
NDISKS = [1, 2, 4, 8]
TIMEOUTS = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
DISKS = ['c1t1d0', 'c1t2d0', 'c1t3d0', 'c2t0d0', 'c2t1d0', 'c2t2d0',
'c4t0d0', 'c4t1d0', 'c4t2d0', 'c3t0d0', 'c3t1d0']
def get_fio_iops_series(results):
jq = sh.jq.bake('-M', '-r')
iops = []
for numjobs in NUMJOBS:
data = jq('.jobs[0].write.iops', '{:s}/fio-{:d}-jobs/fio.json'.format(results, numjobs))
iops.append(float(data.strip()))
return pd.Series(iops, NUMJOBS)
def get_fio_latency_series(results):
jq = sh.jq.bake('-M', '-r')
iops = []
for numjobs in NUMJOBS:
data = jq('.jobs[0].write.lat.mean', '{:s}/fio-{:d}-jobs/fio.json'.format(results, numjobs))
iops.append(float(data.strip()))
return pd.Series(iops, NUMJOBS)
def get_iostat_series(results, column, ndisks):
jobavgs = []
for numjobs in NUMJOBS:
diskavgs = pd.Series()
for disk in DISKS[0:ndisks]:
data = pd.read_csv('{:s}/fio-{:d}-jobs/iostat-{:s}.txt'.format(results, numjobs, disk),
delim_whitespace=True, header=None, names=IOSTAT_COLUMNS, skiprows=5)
diskavgs[disk] = data[column].mean()
jobavgs.append(data[column].mean())
return pd.Series(jobavgs, NUMJOBS)
def get_pctchange_dataframe(project, master):
diff = pd.DataFrame()
for plabel, mlabel in zip(project, master):
new = project[plabel]
old = master[mlabel]
diff[plabel] = 100 * ((new - old) / old)
return diff
def plot_fio_iops_dataframe(df):
df.plot(figsize=(30, 15), style=':o')
plt.title('fio -- write iops vs. fio threads')
plt.xlabel('number of fio threads issuing writes')
plt.ylabel('write iops reported by fio')
plt.loglog(basey=2)
plt.xticks(df.index, df.index)
plt.show()
def plot_fio_iops_pctchange_dataframe(df):
df.plot(figsize=(30, 15), style=':o')
plt.title('fio -- % change in write iops vs. fio threads (project vs. baseline)')
plt.xlabel('number of fio threads issuing writes')
plt.ylabel('% change in write iops reported by fio')
plt.ylim(-50, 150)
plt.xscale('log')
plt.xticks(df.index, df.index)
plt.show()
def plot_fio_latency_dataframe(df):
df.plot(figsize=(30, 15), style=':o')
plt.title('fio -- average write latency vs. fio threads')
plt.xlabel('number of fio threads issuing writes')
plt.ylabel('average write latency reported by fio (microseconds)')
plt.loglog(basey=2)
plt.xticks(df.index, df.index)
plt.show()
def plot_fio_latency_pctchange_dataframe(df):
df.plot(figsize=(30, 15), style=':o')
plt.title('fio -- % change in average write latency vs. fio threads (project vs. baseline)')
plt.xlabel('number of fio threads issuing writes')
plt.ylabel('% change in average write latency reported by fio')
plt.ylim(-150, 50)
plt.xscale('log')
plt.xticks(df.index, df.index)
plt.show()
def plot_iostat_dataframe(df, column):
df.plot(figsize=(30, 15), style=':o')
plt.title('iostat -- {:s} vs. fio threads'.format(column))
plt.xlabel('number of fio threads issuing writes')
plt.xscale('log')
plt.xticks(df.index, df.index)
plt.show()
def plot_master_lwb_points(plottype, results, title):
ncols = 3
nrows = math.ceil(len(NUMJOBS) / float(ncols))
plt.figure(figsize=(30, 7.5 * nrows))
for i, numjobs in enumerate(NUMJOBS):
df = pd.read_csv('{:s}/fio-{:d}-jobs/dtrace-zil-lwb-points.txt'.format(results, numjobs),
delim_whitespace=True)
plt.subplot(nrows, ncols, i+1 % ncols)
if plottype == 'scatter':
plt.scatter(df.index, df['used'], s=10, label='used')
plt.scatter(df.index, df['size'], s=5, label='size')
plt.title('{:s} - {:d} fio threads'.format(title, numjobs))
plt.xlabel('time')
plt.ylabel('bytes')
plt.yscale('log', basey=2)
plt.ylim(2**9, 2**18)
elif plottype == 'histogram':
bins = 2**(np.arange(9, 18))
plt.hist(df['used'], label='used', bins=bins, alpha=1)
plt.hist(df['size'], label='size', bins=bins, alpha=0.75)
plt.title('{:s} - {:d} fio threads'.format(title, numjobs))
plt.ylabel('count')
plt.xlabel('bytes')
plt.xscale('log', basex=2)
plt.xlim(2**9, 2**18)
plt.xticks(bins)
else:
raise ValueError('invalid plot type specified: {:s}'.format(plottype))
plt.legend()
plt.show()
master_lat_hdd = pd.DataFrame()
master_iops_hdd = pd.DataFrame()
master_busy_hdd = pd.DataFrame()
master_actv_hdd = pd.DataFrame()
master_asvc_hdd = pd.DataFrame()
for i in NDISKS:
label = 'baseline - {:d} {:s}'.format(i, 'hdd')
results = 'zfs-workload/{:s}/results-master/{:d}-{:s}/default'.format(TEST, i, 'hdd')
master_lat_hdd[label] = get_fio_latency_series(results)
master_iops_hdd[label] = get_fio_iops_series(results)
master_busy_hdd[label] = get_iostat_series(results, '%b', i)
master_actv_hdd[label] = get_iostat_series(results, 'actv', i)
master_asvc_hdd[label] = get_iostat_series(results, 'asvc_t', i)
project_lat_hdd = pd.DataFrame()
project_iops_hdd = pd.DataFrame()
project_busy_hdd = pd.DataFrame()
project_actv_hdd = pd.DataFrame()
project_asvc_hdd = pd.DataFrame()
for i in NDISKS:
label = 'project - {:d} {:s} - {:d}% lwb timeout'.format(i, 'hdd', TIMEOUT)
results = 'zfs-workload/{:s}/results-project/{:d}-{:s}/{:d}pct-timeout'.format(TEST, i, 'hdd', TIMEOUT)
project_lat_hdd[label] = get_fio_latency_series(results)
project_iops_hdd[label] = get_fio_iops_series(results)
project_busy_hdd[label] = get_iostat_series(results, '%b', i)
project_actv_hdd[label] = get_iostat_series(results, 'actv', i)
project_asvc_hdd[label] = get_iostat_series(results, 'asvc_t', i)
pctchange_lat_hdd = get_pctchange_dataframe(project_lat_hdd, master_lat_hdd)
pctchange_iops_hdd = get_pctchange_dataframe(project_iops_hdd, master_iops_hdd)
master_lat_ssd = pd.DataFrame()
master_iops_ssd = pd.DataFrame()
master_busy_ssd = pd.DataFrame()
master_actv_ssd = pd.DataFrame()
master_asvc_ssd = pd.DataFrame()
for i in NDISKS:
label = 'baseline - {:d} {:s}'.format(i, 'ssd')
results = 'zfs-workload/{:s}/results-master/{:d}-{:s}/default'.format(TEST, i, 'ssd')
master_lat_ssd[label] = get_fio_latency_series(results)
master_iops_ssd[label] = get_fio_iops_series(results)
master_busy_ssd[label] = get_iostat_series(results, '%b', i)
master_actv_ssd[label] = get_iostat_series(results, 'actv', i)
master_asvc_ssd[label] = get_iostat_series(results, 'asvc_t', i)
project_lat_ssd = pd.DataFrame()
project_iops_ssd = pd.DataFrame()
project_busy_ssd = pd.DataFrame()
project_actv_ssd = pd.DataFrame()
project_asvc_ssd = pd.DataFrame()
for i in NDISKS:
label = 'project - {:d} {:s} - {:d}% lwb timeout'.format(i, 'ssd', TIMEOUT)
results = 'zfs-workload/{:s}/results-project/{:d}-{:s}/{:d}pct-timeout'.format(TEST, i, 'ssd', TIMEOUT)
project_lat_ssd[label] = get_fio_latency_series(results)
project_iops_ssd[label] = get_fio_iops_series(results)
project_busy_ssd[label] = get_iostat_series(results, '%b', i)
project_actv_ssd[label] = get_iostat_series(results, 'actv', i)
project_asvc_ssd[label] = get_iostat_series(results, 'asvc_t', i)
pctchange_lat_ssd = get_pctchange_dataframe(project_lat_ssd, master_lat_ssd)
pctchange_iops_ssd = get_pctchange_dataframe(project_iops_ssd, master_iops_ssd)
This workload consisted of using fio
to drive synchronous writes, while varying the number of threads used by fio
. Each fio
thread would issue writes to a unique file, using sequential file offsets, pwrite
, O_SYNC
, a blocksize of 8k
, and a queue depth of 1 (i.e. each thread performing a single write at a time). Additionally, each thread would attempt to acheive a bandwidth of about 64 writes per second; i.e. after a write completes, the thread may artficially delay, such that it doesn't exceed its target of 64 write operations per second. Here's the fio
configuration used to acheive this:
[global]
group_reporting
clocksource=cpu
ioengine=psync
fallocate=none
blocksize=8k
runtime=60
time_based
iodepth=1
rw=write
thread=0
direct=0
sync=1
# Real world random request flow follows Poisson process. To give better
# insight on latency distribution, we simulate request flow under Poisson
# process.
rate_process=poisson
rate_iops=64
[zfs-workload]
The command line flag --numjobs
was used to vary the number of threads used for each invocation, ranging from a single thread to 1024 threads.
The above fio
workload was run on zpools with varying numbers of direct attached disks; configurations of 1 disk, 2 disks, 4 disks, and 8 disks were used. Additionally, both traditional magnetic drives and solid state drives were tested. All configuration options were kept default at the zpool level (i.e. no -o
options were passed to zpool create
).
For all tests, a single ZFS dataset was used to store all the fio
files for all thread counts. The configuration options used for this dataset were the following: recsize=8k
, compress=lz4
, checksum=edonr
, redundant_metadata=most
. These were all chosen to match the options used by our Delphix Engine, except recsize
, which was used to avoid the read-modify-write penalty since fio
was issuing 8k
writes.
Two different system configurations were used for these tests. When running the tests on a zpool with magnetic drives, the "HDD" system was used; and when running the tests on a zpool with solid state drives, the "SSD" system was used.
What follows is an overview of the performance information gathered from running the fio
workload described above, on zpools comprised of magnetic drives, using the "HDD" system configuration. There will be results for the "baseline" branch of the os-gate (specifically, commit d99a4e8b5c
of the "master" branch), as well as my custom "project" branch.
fio
vs. number of fio
threads¶Below is a graph of the write IOPs reported by fio
(using the write.iops
metric), which accounts for all fio
threads in the given run; i.e. it's the aggregate value for all fio
threads vs. the value of each individual fio
thread. Additionally, each line corresponds to a different zpool configuration; each zpool configuration comprised of a different number of disks in the pool.
fio
vs. number of fio
threads - baseline branch¶plot_fio_iops_dataframe(master_iops_hdd)
master_iops_hdd
baseline - 1 hdd | baseline - 2 hdd | baseline - 4 hdd | baseline - 8 hdd | |
---|---|---|---|---|
1 | 63.086019 | 63.102834 | 63.104936 | 63.082127 |
2 | 127.009245 | 126.975404 | 127.017708 | 126.991003 |
4 | 254.418550 | 254.261553 | 254.274251 | 254.261553 |
8 | 511.529490 | 511.119989 | 511.153654 | 511.153283 |
16 | 1012.911932 | 1023.721139 | 1023.906240 | 1023.873739 |
32 | 1579.966344 | 2040.357250 | 2043.733768 | 2044.513992 |
64 | 2763.592435 | 2949.631777 | 3966.092375 | 4096.571134 |
128 | 4336.204455 | 4061.521290 | 5269.706157 | 6852.563355 |
256 | 5514.430363 | 7257.887723 | 6633.361079 | 8700.940961 |
512 | 7502.346636 | 9897.906500 | 10417.680846 | 10696.507894 |
1024 | 7443.628632 | 12098.213841 | 14688.071716 | 13020.219488 |
fio
vs. number of fio
threads - project branch¶plot_fio_iops_dataframe(project_iops_hdd)
project_iops_hdd
project - 1 hdd - 5% lwb timeout | project - 2 hdd - 5% lwb timeout | project - 4 hdd - 5% lwb timeout | project - 8 hdd - 5% lwb timeout | |
---|---|---|---|---|
1 | 63.101782 | 63.071313 | 63.095476 | 63.068617 |
2 | 127.026172 | 127.019823 | 126.943695 | 126.973816 |
4 | 254.265785 | 254.261553 | 254.210772 | 254.253088 |
8 | 511.137378 | 511.161792 | 511.161420 | 511.203969 |
16 | 1023.837267 | 1023.939137 | 1023.870957 | 1024.023974 |
32 | 2044.046410 | 2044.365646 | 2045.149471 | 2044.983102 |
64 | 4096.971413 | 4097.333933 | 4097.285595 | 4099.205622 |
128 | 6915.025820 | 8045.968777 | 8124.390041 | 8179.748522 |
256 | 7865.908106 | 12252.785203 | 14895.994003 | 14823.153267 |
512 | 7541.328245 | 13695.750557 | 19872.590386 | 23560.001332 |
1024 | 7789.282803 | 13758.406682 | 23917.667809 | 31686.370749 |
fio
vs. number of fio
threads - project vs. baseline¶The following graph shows the percentage change for the IOPs reported by fio
, between the "project" and "baseline" test runs. A positive value here reflects an increase in the IOPs reported by fio when comparing the results of the "project" to the "baseline"; i.e. positive is better. Additionally, a 100% increase would reflect a doubling of the IOPs; e.g. going from 100 IOPs in the "baseline" run, to 200 IOPs in the "project" run. Similarly, a 50% decrease would equate to halving the IOPs.
plot_fio_iops_pctchange_dataframe(pctchange_iops_hdd)
pctchange_iops_hdd
project - 1 hdd - 5% lwb timeout | project - 2 hdd - 5% lwb timeout | project - 4 hdd - 5% lwb timeout | project - 8 hdd - 5% lwb timeout | |
---|---|---|---|---|
1 | 0.024987 | -0.049952 | -0.014991 | -0.021417 |
2 | 0.013327 | 0.034982 | -0.058270 | -0.013534 |
4 | -0.060045 | 0.000000 | -0.024965 | -0.003329 |
8 | -0.076655 | 0.008179 | 0.001519 | 0.009916 |
16 | 1.078607 | 0.021295 | -0.003446 | 0.014673 |
32 | 29.372782 | 0.196456 | 0.069270 | 0.022945 |
64 | 48.248033 | 38.910015 | 3.307871 | 0.064310 |
128 | 59.471858 | 98.102342 | 54.171595 | 19.367718 |
256 | 42.642260 | 68.820264 | 124.561784 | 70.362646 |
512 | 0.519592 | 38.370175 | 90.758295 | 120.258813 |
1024 | 4.643625 | 13.722628 | 62.837357 | 143.362800 |
fio
vs. number of fio
threads¶Below is a graph of the average write latency (in microseconds) reported by fio
(using the write.lat.mean
metric), for all fio
threads in the test run. Just like the graph of IOPs above, each line represents a different zpool configuration, and there's data for the "baseline" as well as the "project" branch.
fio
vs. number of fio
threads - baseline branch¶plot_fio_latency_dataframe(master_lat_hdd)
master_lat_hdd
baseline - 1 hdd | baseline - 2 hdd | baseline - 4 hdd | baseline - 8 hdd | |
---|---|---|---|---|
1 | 4386.720697 | 3699.620116 | 4634.929778 | 9300.949023 |
2 | 7581.838951 | 4925.282754 | 4704.590820 | 5819.004461 |
4 | 11447.210895 | 7796.208655 | 7576.114443 | 7035.875409 |
8 | 13123.875513 | 7591.445740 | 7284.120111 | 7367.676871 |
16 | 14422.498594 | 9326.144167 | 8674.434834 | 8286.968943 |
32 | 20250.782469 | 12715.911801 | 10192.632968 | 9240.433543 |
64 | 23154.195952 | 21693.105609 | 15790.146192 | 13058.831704 |
128 | 29511.755590 | 31506.163454 | 24283.091051 | 18665.823596 |
256 | 46402.835204 | 35261.516869 | 38564.844366 | 29406.862271 |
512 | 68193.031212 | 51705.126769 | 49121.286463 | 47834.087059 |
1024 | 137336.580874 | 84472.640787 | 69609.612122 | 78527.746234 |
fio
vs. number of fio
threads - project branch¶plot_fio_latency_dataframe(project_lat_hdd)
project_lat_hdd
project - 1 hdd - 5% lwb timeout | project - 2 hdd - 5% lwb timeout | project - 4 hdd - 5% lwb timeout | project - 8 hdd - 5% lwb timeout | |
---|---|---|---|---|
1 | 4513.293031 | 4127.678986 | 4870.434266 | 9654.541612 |
2 | 5227.947541 | 4318.056393 | 4174.912262 | 5178.429621 |
4 | 6849.039282 | 5389.744533 | 4451.872463 | 4518.127472 |
8 | 8090.717031 | 6873.544714 | 5430.948839 | 5005.293451 |
16 | 8768.124963 | 8723.167266 | 6825.245127 | 5642.219142 |
32 | 12796.924515 | 10481.289350 | 9074.934536 | 6763.687209 |
64 | 14259.329574 | 12593.701803 | 11451.749913 | 9156.383185 |
128 | 18499.822620 | 14924.895262 | 15264.485818 | 12896.194477 |
256 | 32536.370185 | 20853.088543 | 17152.628998 | 17208.747745 |
512 | 67855.529156 | 37349.433450 | 25731.465659 | 21711.894166 |
1024 | 131341.864978 | 74336.177008 | 42767.358304 | 32209.249758 |
fio
vs. number of fio
threads - project vs. baseline¶The following graph shows the percentage change for the average write latency reported by fio
, between the "project" and "baseline" test runs. A positive value here reflects an increase in the average write latency reported by fio
when comparing the "project" to the "baseline". Thus, unlike the IOPs numbers above, a negative value here is better.
plot_fio_latency_pctchange_dataframe(pctchange_lat_hdd)
pctchange_lat_hdd
project - 1 hdd - 5% lwb timeout | project - 2 hdd - 5% lwb timeout | project - 4 hdd - 5% lwb timeout | project - 8 hdd - 5% lwb timeout | |
---|---|---|---|---|
1 | 2.885352 | 11.570347 | 5.081080 | 3.801683 |
2 | -31.046444 | -12.328761 | -11.258759 | -11.008324 |
4 | -40.168489 | -30.867108 | -41.238052 | -35.784430 |
8 | -38.351160 | -9.456710 | -25.441251 | -32.064156 |
16 | -39.205229 | -6.465447 | -21.317697 | -31.914562 |
32 | -36.807753 | -17.573435 | -10.965748 | -26.803356 |
64 | -38.415786 | -41.946063 | -27.475340 | -29.883596 |
128 | -37.313717 | -52.628649 | -37.139445 | -30.910123 |
256 | -29.882797 | -40.861624 | -55.522629 | -41.480503 |
512 | -0.494922 | -27.764545 | -47.616466 | -54.609996 |
1024 | -4.364981 | -11.999700 | -38.561131 | -58.983606 |
asvc_t
averaged across all disks in zpool vs. fio
threads¶Below is a graph of the asvc_t
column from iostat
for all disks in the zpool.
The single values that's shown was generating by using 1 second samples (i.e. iostat -xn 1
) for each disk across the entire runtime of the test. These samples were then averaged to acheive a single asvc_t
for each disk in the zpool. This single value per disk was then averaged across all disks in the zpool, to achieve a single asvc_t
value representing all disks in the zpool.
This provides an approximation for how long it took the disks to service each individual IO. Like before, there's data for the "baseline" and the "project" branch.
asvc_t
averaged across all disks in zpool vs. fio
threads - baseline branch¶plot_iostat_dataframe(master_asvc_hdd, 'asvc_t')
master_asvc_hdd
baseline - 1 hdd | baseline - 2 hdd | baseline - 4 hdd | baseline - 8 hdd | |
---|---|---|---|---|
1 | 2.262500 | 1.998214 | 2.419298 | 3.245614 |
2 | 2.732143 | 2.048214 | 2.371930 | 2.424561 |
4 | 3.248214 | 2.410714 | 2.328070 | 2.333333 |
8 | 3.623214 | 2.183929 | 2.385965 | 2.394737 |
16 | 4.278571 | 2.735714 | 2.708772 | 2.391228 |
32 | 6.198214 | 3.569643 | 2.968421 | 2.952632 |
64 | 7.635714 | 6.001786 | 3.947368 | 3.589474 |
128 | 10.346429 | 8.373214 | 6.408772 | 4.294737 |
256 | 13.354386 | 9.538596 | 7.650877 | 5.710345 |
512 | 19.707018 | 12.085965 | 10.768966 | 7.828814 |
1024 | 29.294915 | 20.686441 | 13.416949 | 11.310000 |
asvc_t
averaged across all disks in zpool vs. fio
threads - project branch¶plot_iostat_dataframe(project_asvc_hdd, 'asvc_t')
project_asvc_hdd
project - 1 hdd - 5% lwb timeout | project - 2 hdd - 5% lwb timeout | project - 4 hdd - 5% lwb timeout | project - 8 hdd - 5% lwb timeout | |
---|---|---|---|---|
1 | 2.225000 | 2.071429 | 2.340351 | 3.140351 |
2 | 2.525000 | 2.107143 | 2.103509 | 2.394737 |
4 | 3.300000 | 2.475000 | 2.042105 | 2.091228 |
8 | 3.953571 | 3.051786 | 2.284211 | 2.236842 |
16 | 4.355357 | 3.682143 | 2.759649 | 2.268421 |
32 | 6.894643 | 4.353571 | 3.389474 | 2.603509 |
64 | 7.289286 | 5.453571 | 4.349123 | 3.436842 |
128 | 9.092857 | 6.155357 | 5.243860 | 4.646552 |
256 | 15.147368 | 8.680702 | 6.208772 | 5.800000 |
512 | 21.684746 | 12.773684 | 7.998276 | 6.469492 |
1024 | 26.715254 | 17.015254 | 12.715000 | 7.365574 |
%b
averaged across all disks in zpool vs. fio
threads¶Below is a graph of the %b
column from iostat
for all disks in the zpool. These numbers were gathered using the same approach used for generating the asvc_t
numbers above; by averaging 1 second samples for each disk, and then averaging across all disks.
This provides an approximation for how utilized the disks in the zpool were, during the runtime of the fio
workload.
%b
averaged across all disks in zpool vs. fio
threads - baseline branch¶plot_iostat_dataframe(master_busy_hdd, '%b')
master_busy_hdd
baseline - 1 hdd | baseline - 2 hdd | baseline - 4 hdd | baseline - 8 hdd | |
---|---|---|---|---|
1 | 28.625000 | 13.625000 | 8.403509 | 6.105263 |
2 | 66.232143 | 26.571429 | 15.508772 | 8.596491 |
4 | 96.892857 | 44.785714 | 22.561404 | 11.982456 |
8 | 98.339286 | 50.767857 | 29.350877 | 15.157895 |
16 | 98.839286 | 57.410714 | 31.543860 | 15.473684 |
32 | 99.446429 | 73.625000 | 37.929825 | 21.175439 |
64 | 99.964286 | 90.642857 | 72.789474 | 39.596491 |
128 | 99.892857 | 98.892857 | 89.807018 | 67.245614 |
256 | 99.807018 | 99.526316 | 96.298246 | 80.775862 |
512 | 99.912281 | 99.263158 | 99.034483 | 88.440678 |
1024 | 99.898305 | 99.644068 | 99.033898 | 92.800000 |
%b
averaged across all disks in zpool vs. fio
threads - project branch¶plot_iostat_dataframe(project_busy_hdd, '%b')
project_busy_hdd
project - 1 hdd - 5% lwb timeout | project - 2 hdd - 5% lwb timeout | project - 4 hdd - 5% lwb timeout | project - 8 hdd - 5% lwb timeout | |
---|---|---|---|---|
1 | 28.142857 | 13.821429 | 7.982456 | 5.912281 |
2 | 51.375000 | 26.625000 | 13.649123 | 7.719298 |
4 | 83.803571 | 52.750000 | 24.789474 | 13.070175 |
8 | 97.821429 | 83.428571 | 47.912281 | 25.070175 |
16 | 99.928571 | 93.571429 | 75.877193 | 41.964912 |
32 | 100.000000 | 97.571429 | 86.385965 | 65.982456 |
64 | 99.910714 | 99.553571 | 95.070175 | 81.964912 |
128 | 99.839286 | 99.589286 | 97.701754 | 91.637931 |
256 | 99.842105 | 99.701754 | 99.157895 | 93.862069 |
512 | 99.830508 | 99.578947 | 99.051724 | 97.949153 |
1024 | 100.000000 | 99.559322 | 99.033333 | 98.557377 |
actv
averaged across all disks in zpool vs. fio
threads¶Below is a graph of the actv
column from iostat
for all disks in the zpool. This provides an approximation for how many IOs the disks in the zpool were servicing at any given time.
actv
averaged across all disks in zpool vs. fio
threads - baseline branch¶plot_iostat_dataframe(master_actv_hdd, 'actv')
master_actv_hdd
baseline - 1 hdd | baseline - 2 hdd | baseline - 4 hdd | baseline - 8 hdd | |
---|---|---|---|---|
1 | 0.314286 | 0.141071 | 0.110526 | 0.050877 |
2 | 0.735714 | 0.275000 | 0.152632 | 0.100000 |
4 | 1.114286 | 0.467857 | 0.243860 | 0.126316 |
8 | 1.180357 | 0.560714 | 0.310526 | 0.156140 |
16 | 1.441071 | 0.658929 | 0.333333 | 0.159649 |
32 | 2.107143 | 1.007143 | 0.452632 | 0.233333 |
64 | 3.375000 | 1.778571 | 0.966667 | 0.468421 |
128 | 5.289286 | 2.785714 | 1.677193 | 0.898246 |
256 | 7.456140 | 4.105263 | 2.273684 | 1.308621 |
512 | 14.222807 | 6.696491 | 3.712069 | 1.871186 |
1024 | 21.452542 | 14.066102 | 5.783051 | 2.960000 |
actv
averaged across all disks in zpool vs. fio
threads - project branch¶plot_iostat_dataframe(project_actv_hdd, 'actv')
project_actv_hdd
project - 1 hdd - 5% lwb timeout | project - 2 hdd - 5% lwb timeout | project - 4 hdd - 5% lwb timeout | project - 8 hdd - 5% lwb timeout | |
---|---|---|---|---|
1 | 0.310714 | 0.151786 | 0.103509 | 0.057895 |
2 | 0.666071 | 0.289286 | 0.140351 | 0.100000 |
4 | 1.512500 | 0.623214 | 0.254386 | 0.128070 |
8 | 2.900000 | 1.253571 | 0.519298 | 0.245614 |
16 | 5.291071 | 2.189286 | 0.971930 | 0.443860 |
32 | 8.271429 | 3.389286 | 1.505263 | 0.784211 |
64 | 9.625000 | 4.914286 | 2.398246 | 1.231579 |
128 | 10.789286 | 6.282143 | 2.649123 | 1.867241 |
256 | 20.794737 | 10.231579 | 4.614035 | 2.141379 |
512 | 29.500000 | 18.140351 | 7.955172 | 3.876271 |
1024 | 35.916949 | 23.806780 | 15.560000 | 6.224590 |
The visualizations below are on-cpu flame-graphs of the entire system, using kernel level stacks. Unlike the line graphs above, there isn't a straightforward way to condense all of the test runs into a single flame-graph visualization. Thus, instead of showing the unique graph for each configuration (there's 40 total configurations), 2 configurations were specifically chosen with hopes that the two show a representative sample of the whole population. The two chose configuration are:
fio
threadsfio
threadsBoth configurations have the largest number of fio
threads available; and then one configuration has the largest number of disks, and the other configuration has the least number of disks.
fio
threads - baseline branch¶fio
threads - project branch - 5% timeout¶fio
threads - baseline branch¶fio
threads - project branch - 5% timeout¶The below graphs contain data about the size of the ZIL blocks (lwb) issued to disks, and the amount of space within the lwb that was used. For each lwb issued to disk, there will be a point to represent the size of the lwb, and the amount of used space within that lwb.
This information provides insight into how the lwb block size selection algorithm and the utilization of the lwb buffer, both of which can be critical for acheiving maximal performance out of the underlying storage.
results = 'zfs-workload/{:s}/results-master/{:d}-{:s}/default'.format(TEST, 1, 'hdd')
title = 'baseline - {:d} {:s}'.format(1, 'hdd')
plot_master_lwb_points('scatter', results, title)
results = 'zfs-workload/{:s}/results-project/{:d}-{:s}/{:d}pct-timeout'.format(TEST, 1, 'hdd', TIMEOUT)
title = 'project - {:d}% timeout - {:d} {:s}'.format(TIMEOUT, 1, 'hdd')
plot_master_lwb_points('scatter', results, title)
results = 'zfs-workload/{:s}/results-master/{:d}-{:s}/default'.format(TEST, 2, 'hdd')
title = 'baseline - {:d} {:s}'.format(2, 'hdd')
plot_master_lwb_points('scatter', results, title)
results = 'zfs-workload/{:s}/results-project/{:d}-{:s}/{:d}pct-timeout'.format(TEST, 2, 'hdd', TIMEOUT)
title = 'project - {:d}% timeout - {:d} {:s}'.format(TIMEOUT, 2, 'hdd')
plot_master_lwb_points('scatter', results, title)
results = 'zfs-workload/{:s}/results-master/{:d}-{:s}/default'.format(TEST, 4, 'hdd')
title = 'baseline - {:d} {:s}'.format(4, 'hdd')
plot_master_lwb_points('scatter', results, title)
results = 'zfs-workload/{:s}/results-project/{:d}-{:s}/{:d}pct-timeout'.format(TEST, 4, 'hdd', TIMEOUT)
title = 'project - {:d}% timeout - {:d} {:s}'.format(TIMEOUT, 4, 'hdd')
plot_master_lwb_points('scatter', results, title)
results = 'zfs-workload/{:s}/results-master/{:d}-{:s}/default'.format(TEST, 8, 'hdd')
title = 'baseline - {:d} {:s}'.format(8, 'hdd')
plot_master_lwb_points('scatter', results, title)
results = 'zfs-workload/{:s}/results-project/{:d}-{:s}/{:d}pct-timeout'.format(TEST, 8, 'hdd', TIMEOUT)
title = 'project - {:d}% timeout - {:d} {:s}'.format(TIMEOUT, 8, 'hdd')
plot_master_lwb_points('scatter', results, title)
results = 'zfs-workload/{:s}/results-master/{:d}-{:s}/default'.format(TEST, 1, 'hdd')
title = 'baseline - {:d} {:s}'.format(1, 'hdd')
plot_master_lwb_points('histogram', results, title)
results = 'zfs-workload/{:s}/results-project/{:d}-{:s}/{:d}pct-timeout'.format(TEST, 1, 'hdd', TIMEOUT)
title = 'project - {:d}% timeout - {:d} {:s}'.format(TIMEOUT, 1, 'hdd')
plot_master_lwb_points('histogram', results, title)
results = 'zfs-workload/{:s}/results-master/{:d}-{:s}/default'.format(TEST, 2, 'hdd')
title = 'baseline - {:d} {:s}'.format(2, 'hdd')
plot_master_lwb_points('histogram', results, title)
results = 'zfs-workload/{:s}/results-project/{:d}-{:s}/{:d}pct-timeout'.format(TEST, 2, 'hdd', TIMEOUT)
title = 'project - {:d}% timeout - {:d} {:s}'.format(TIMEOUT, 2, 'hdd')
plot_master_lwb_points('histogram', results, title)
results = 'zfs-workload/{:s}/results-master/{:d}-{:s}/default'.format(TEST, 4, 'hdd')
title = 'baseline - {:d} {:s}'.format(4, 'hdd')
plot_master_lwb_points('histogram', results, title)
results = 'zfs-workload/{:s}/results-project/{:d}-{:s}/{:d}pct-timeout'.format(TEST, 4, 'hdd', TIMEOUT)
title = 'project - {:d}% timeout - {:d} {:s}'.format(TIMEOUT, 4, 'hdd')
plot_master_lwb_points('histogram', results, title)
results = 'zfs-workload/{:s}/results-master/{:d}-{:s}/default'.format(TEST, 8, 'hdd')
title = 'baseline - {:d} {:s}'.format(8, 'hdd')
plot_master_lwb_points('histogram', results, title)
results = 'zfs-workload/{:s}/results-project/{:d}-{:s}/{:d}pct-timeout'.format(TEST, 8, 'hdd', TIMEOUT)
title = 'project - {:d}% timeout - {:d} {:s}'.format(TIMEOUT, 8, 'hdd')
plot_master_lwb_points('histogram', results, title)
What follows is an overview of the performance information gathered from running the fio
workload described above, on zpools comprised of solid state drives, using the "SSD" system configuration. Just like above, there will be results for the "baseline" branch of the os-gate (again, commit d99a4e8b5c
of the "master" branch), as well as my custom "project" branch.
All metrics below match the identical metrics shown in the graphs above. Thus, see the "HDD" sections for explanations about exactly what the metrics means, and how it was generated.
fio
vs. number of fio
threads¶fio
vs. number of fio
threads - baseline branch¶plot_fio_iops_dataframe(master_iops_ssd)
master_iops_ssd
baseline - 1 ssd | baseline - 2 ssd | baseline - 4 ssd | baseline - 8 ssd | |
---|---|---|---|---|
1 | 65.01 | 65.01 | 65.01 | 65.01 |
2 | 127.86 | 127.87 | 127.86 | 127.87 |
4 | 253.52 | 253.52 | 253.52 | 253.52 |
8 | 507.24 | 507.28 | 507.25 | 507.28 |
16 | 1021.53 | 1021.70 | 1021.53 | 1021.70 |
32 | 2049.10 | 2049.08 | 2049.08 | 2049.10 |
64 | 4100.60 | 4100.60 | 4100.60 | 4100.37 |
128 | 8191.46 | 8191.44 | 8191.46 | 8191.46 |
256 | 16375.18 | 16374.20 | 16375.10 | 16375.22 |
512 | 32745.93 | 32746.34 | 32742.47 | 32745.64 |
1024 | 52419.33 | 65421.99 | 65422.03 | 65392.61 |
fio
vs. number of fio
threads - project branch¶plot_fio_iops_dataframe(project_iops_ssd)
project_iops_ssd
project - 1 ssd - 5% lwb timeout | project - 2 ssd - 5% lwb timeout | project - 4 ssd - 5% lwb timeout | project - 8 ssd - 5% lwb timeout | |
---|---|---|---|---|
1 | 65.01 | 65.01 | 65.01 | 65.01 |
2 | 127.87 | 127.86 | 127.87 | 127.87 |
4 | 253.52 | 253.52 | 253.52 | 253.52 |
8 | 507.28 | 507.28 | 507.28 | 507.28 |
16 | 1021.70 | 1021.63 | 1021.70 | 1021.70 |
32 | 2049.10 | 2049.10 | 2049.08 | 2049.10 |
64 | 4100.62 | 4100.60 | 4100.60 | 4100.60 |
128 | 8191.46 | 8191.48 | 8191.46 | 8191.48 |
256 | 16375.27 | 16374.70 | 16375.28 | 16374.27 |
512 | 32747.09 | 32746.61 | 32746.57 | 32747.22 |
1024 | 51292.64 | 65250.61 | 65400.50 | 65396.73 |
fio
vs. number of fio
threads - project vs. baseline¶plot_fio_iops_pctchange_dataframe(pctchange_iops_ssd)
pctchange_iops_ssd
project - 1 ssd - 5% lwb timeout | project - 2 ssd - 5% lwb timeout | project - 4 ssd - 5% lwb timeout | project - 8 ssd - 5% lwb timeout | |
---|---|---|---|---|
1 | 0.000000 | 0.000000 | 0.000000 | 0.000000 |
2 | 0.007821 | -0.007820 | 0.007821 | 0.000000 |
4 | 0.000000 | 0.000000 | 0.000000 | 0.000000 |
8 | 0.007886 | 0.000000 | 0.005914 | 0.000000 |
16 | 0.016642 | -0.006851 | 0.016642 | 0.000000 |
32 | 0.000000 | 0.000976 | 0.000000 | 0.000000 |
64 | 0.000488 | 0.000000 | 0.000000 | 0.005609 |
128 | 0.000000 | 0.000488 | 0.000000 | 0.000244 |
256 | 0.000550 | 0.003054 | 0.001099 | -0.005801 |
512 | 0.003542 | 0.000825 | 0.012522 | 0.004825 |
1024 | -2.149379 | -0.261961 | -0.032909 | 0.006300 |
fio
vs. number of fio
threads¶fio
vs. number of fio
threads - baseline branch¶plot_fio_latency_dataframe(master_lat_ssd)
master_lat_ssd
baseline - 1 ssd | baseline - 2 ssd | baseline - 4 ssd | baseline - 8 ssd | |
---|---|---|---|---|
1 | 257.58 | 254.43 | 293.21 | 303.27 |
2 | 241.66 | 252.51 | 265.58 | 274.95 |
4 | 246.68 | 241.13 | 242.15 | 256.56 |
8 | 274.03 | 298.88 | 262.69 | 260.08 |
16 | 263.38 | 349.07 | 272.74 | 287.69 |
32 | 307.75 | 319.63 | 318.71 | 363.31 |
64 | 376.20 | 414.50 | 441.47 | 453.56 |
128 | 540.04 | 570.09 | 600.39 | 625.05 |
256 | 821.17 | 899.02 | 956.72 | 976.80 |
512 | 2373.29 | 2091.83 | 2368.13 | 2418.09 |
1024 | 19003.94 | 8109.62 | 7553.70 | 8320.33 |
fio
vs. number of fio
threads - project branch¶plot_fio_latency_dataframe(project_lat_ssd)
project_lat_ssd
project - 1 ssd - 5% lwb timeout | project - 2 ssd - 5% lwb timeout | project - 4 ssd - 5% lwb timeout | project - 8 ssd - 5% lwb timeout | |
---|---|---|---|---|
1 | 297.22 | 313.84 | 312.39 | 295.73 |
2 | 231.64 | 240.55 | 246.50 | 279.11 |
4 | 238.39 | 267.77 | 296.34 | 256.68 |
8 | 229.66 | 241.40 | 234.98 | 263.74 |
16 | 238.10 | 244.81 | 264.91 | 260.19 |
32 | 256.98 | 274.18 | 279.38 | 297.84 |
64 | 288.16 | 304.73 | 332.57 | 370.44 |
128 | 371.59 | 459.10 | 421.14 | 473.20 |
256 | 614.07 | 654.27 | 703.03 | 665.63 |
512 | 1535.48 | 1017.09 | 1414.45 | 1501.32 |
1024 | 18454.89 | 4912.91 | 7393.03 | 6349.89 |
fio
vs. number of fio
threads - project vs. baseline¶plot_fio_latency_pctchange_dataframe(pctchange_lat_ssd)
pctchange_lat_ssd
project - 1 ssd - 5% lwb timeout | project - 2 ssd - 5% lwb timeout | project - 4 ssd - 5% lwb timeout | project - 8 ssd - 5% lwb timeout | |
---|---|---|---|---|
1 | 15.389394 | 23.350234 | 6.541387 | -2.486233 |
2 | -4.146321 | -4.736446 | -7.184276 | 1.513002 |
4 | -3.360629 | 11.047982 | 22.378691 | 0.046773 |
8 | -16.191658 | -19.231799 | -10.548555 | 1.407259 |
16 | -9.598299 | -29.867935 | -2.870866 | -9.558900 |
32 | -16.497157 | -14.219566 | -12.340372 | -18.020423 |
64 | -23.402446 | -26.482509 | -24.667588 | -18.326131 |
128 | -31.192134 | -19.468856 | -29.855594 | -24.294056 |
256 | -25.220113 | -27.224088 | -26.516640 | -31.856061 |
512 | -35.301628 | -51.377980 | -40.271438 | -37.912981 |
1024 | -2.889138 | -39.418740 | -2.127037 | -23.682234 |
asvc_t
averaged across all disks in zpool vs. fio
threads¶asvc_t
averaged across all disks in zpool vs. fio
threads - baseline branch¶plot_iostat_dataframe(master_asvc_ssd, 'asvc_t')
master_asvc_ssd
baseline - 1 ssd | baseline - 2 ssd | baseline - 4 ssd | baseline - 8 ssd | |
---|---|---|---|---|
1 | 0.003571 | 0.016071 | 0.014286 | 0.015789 |
2 | 0.010714 | 0.019643 | 0.025000 | 0.017544 |
4 | 0.008929 | 0.007143 | 0.007143 | 0.022807 |
8 | 0.003571 | 0.007143 | 0.003509 | 0.010526 |
16 | 0.001786 | 0.014286 | 0.003509 | 0.010526 |
32 | 0.005357 | 0.007143 | 0.000000 | 0.003509 |
64 | 0.003571 | 0.005357 | 0.001754 | 0.007018 |
128 | 0.014286 | 0.021053 | 0.007018 | 0.015789 |
256 | 0.100000 | 0.101754 | 0.100000 | 0.101724 |
512 | 0.206897 | 0.201724 | 0.101724 | 0.105085 |
1024 | 1.333333 | 0.408333 | 0.203333 | 0.183607 |
asvc_t
averaged across all disks in zpool vs. fio
threads - project branch¶plot_iostat_dataframe(project_asvc_ssd, 'asvc_t')
project_asvc_ssd
project - 1 ssd - 5% lwb timeout | project - 2 ssd - 5% lwb timeout | project - 4 ssd - 5% lwb timeout | project - 8 ssd - 5% lwb timeout | |
---|---|---|---|---|
1 | 0.014286 | 0.033929 | 0.012500 | 0.010526 |
2 | 0.001786 | 0.008929 | 0.014286 | 0.047368 |
4 | 0.007143 | 0.030357 | 0.049123 | 0.024561 |
8 | 0.001786 | 0.016071 | 0.001754 | 0.012281 |
16 | 0.007143 | 0.021429 | 0.014035 | 0.017544 |
32 | 0.001786 | 0.014286 | 0.000000 | 0.003509 |
64 | 0.000000 | 0.007143 | 0.007018 | 0.005263 |
128 | 0.014286 | 0.028571 | 0.015789 | 0.018966 |
256 | 0.101754 | 0.112281 | 0.067241 | 0.094828 |
512 | 0.115517 | 0.106897 | 0.103448 | 0.110169 |
1024 | 1.193333 | 0.215000 | 0.109836 | 0.114754 |
%b
averaged across all disks in zpool vs. fio
threads¶%b
averaged across all disks in zpool vs. fio
threads - baseline branch¶plot_iostat_dataframe(master_busy_ssd, '%b')
master_busy_ssd
baseline - 1 ssd | baseline - 2 ssd | baseline - 4 ssd | baseline - 8 ssd | |
---|---|---|---|---|
1 | 0.464286 | 0.107143 | 0.000000 | 0.000000 |
2 | 1.089286 | 0.375000 | 0.107143 | 0.000000 |
4 | 1.910714 | 1.071429 | 0.303571 | 0.035088 |
8 | 3.696429 | 2.017857 | 1.035088 | 0.245614 |
16 | 7.160714 | 4.071429 | 1.807018 | 1.052632 |
32 | 14.053571 | 7.267857 | 3.403509 | 1.473684 |
64 | 23.035714 | 11.910714 | 5.877193 | 3.228070 |
128 | 36.607143 | 19.315789 | 9.719298 | 5.368421 |
256 | 67.473684 | 48.385965 | 22.877193 | 14.034483 |
512 | 78.551724 | 73.810345 | 46.655172 | 31.355932 |
1024 | 93.983333 | 75.800000 | 57.633333 | 39.836066 |
%b
averaged across all disks in zpool vs. fio
threads - project branch¶plot_iostat_dataframe(project_busy_ssd, '%b')
project_busy_ssd
project - 1 ssd - 5% lwb timeout | project - 2 ssd - 5% lwb timeout | project - 4 ssd - 5% lwb timeout | project - 8 ssd - 5% lwb timeout | |
---|---|---|---|---|
1 | 0.517857 | 0.303571 | 0.000000 | 0.000000 |
2 | 1.035714 | 0.339286 | 0.053571 | 0.087719 |
4 | 2.125000 | 1.410714 | 0.736842 | 0.105263 |
8 | 3.571429 | 2.053571 | 1.017544 | 0.298246 |
16 | 6.892857 | 3.857143 | 1.947368 | 1.140351 |
32 | 13.750000 | 7.500000 | 3.438596 | 1.947368 |
64 | 25.535714 | 14.125000 | 6.631579 | 3.701754 |
128 | 47.160714 | 27.660714 | 12.824561 | 6.965517 |
256 | 84.771930 | 66.263158 | 31.517241 | 19.293103 |
512 | 91.396552 | 82.965517 | 52.603448 | 35.288136 |
1024 | 94.300000 | 82.983333 | 55.508197 | 36.327869 |
actv
averaged across all disks in zpool vs. fio
threads¶actv
averaged across all disks in zpool vs. fio
threads - baseline branch¶plot_iostat_dataframe(master_actv_ssd, 'actv')
master_actv_ssd
baseline - 1 ssd | baseline - 2 ssd | baseline - 4 ssd | baseline - 8 ssd | |
---|---|---|---|---|
1 | 0.000000 | 0.000000 | 0.000000 | 0.000000 |
2 | 0.000000 | 0.000000 | 0.001786 | 0.000000 |
4 | 0.001786 | 0.000000 | 0.000000 | 0.000000 |
8 | 0.008929 | 0.000000 | 0.000000 | 0.000000 |
16 | 0.101786 | 0.026786 | 0.000000 | 0.000000 |
32 | 0.137500 | 0.100000 | 0.000000 | 0.000000 |
64 | 0.255357 | 0.108929 | 0.078947 | 0.001754 |
128 | 0.417857 | 0.198246 | 0.101754 | 0.056140 |
256 | 0.949123 | 0.592982 | 0.249123 | 0.156897 |
512 | 1.827586 | 1.144828 | 0.563793 | 0.338983 |
1024 | 15.230000 | 2.295000 | 0.918333 | 0.477049 |
actv
averaged across all disks in zpool vs. fio
threads - project branch¶plot_iostat_dataframe(project_actv_ssd, 'actv')
project_actv_ssd
project - 1 ssd - 5% lwb timeout | project - 2 ssd - 5% lwb timeout | project - 4 ssd - 5% lwb timeout | project - 8 ssd - 5% lwb timeout | |
---|---|---|---|---|
1 | 0.001786 | 0.000000 | 0.000000 | 0.000000 |
2 | 0.000000 | 0.000000 | 0.000000 | 0.000000 |
4 | 0.001786 | 0.005357 | 0.005263 | 0.000000 |
8 | 0.012500 | 0.003571 | 0.000000 | 0.000000 |
16 | 0.100000 | 0.025000 | 0.003509 | 0.000000 |
32 | 0.142857 | 0.112500 | 0.003509 | 0.000000 |
64 | 0.291071 | 0.182143 | 0.110526 | 0.005263 |
128 | 0.662500 | 0.389286 | 0.150877 | 0.118966 |
256 | 2.029825 | 1.242105 | 0.436207 | 0.237931 |
512 | 3.663793 | 1.605172 | 0.681034 | 0.450847 |
1024 | 18.405000 | 2.405000 | 0.806557 | 0.454098 |
fio
threads - baseline branch¶fio
threads - project branch - 5% timeout¶fio
threads - baseline branch¶fio
threads - project branch - 5% timeout¶results = 'zfs-workload/{:s}/results-master/{:d}-{:s}/default'.format(TEST, 1, 'ssd')
title = 'baseline - {:d} {:s}'.format(1, 'ssd')
plot_master_lwb_points('scatter', results, title)
results = 'zfs-workload/{:s}/results-project/{:d}-{:s}/{:d}pct-timeout'.format(TEST, 1, 'ssd', TIMEOUT)
title = 'project - {:d}% timeout - {:d} {:s}'.format(TIMEOUT, 1, 'ssd')
plot_master_lwb_points('scatter', results, title)
results = 'zfs-workload/{:s}/results-master/{:d}-{:s}/default'.format(TEST, 2, 'ssd')
title = 'baseline - {:d} {:s}'.format(2, 'ssd')
plot_master_lwb_points('scatter', results, title)
results = 'zfs-workload/{:s}/results-project/{:d}-{:s}/{:d}pct-timeout'.format(TEST, 2, 'ssd', TIMEOUT)
title = 'project - {:d}% timeout - {:d} {:s}'.format(TIMEOUT, 2, 'ssd')
plot_master_lwb_points('scatter', results, title)
results = 'zfs-workload/{:s}/results-master/{:d}-{:s}/default'.format(TEST, 4, 'ssd')
title = 'baseline - {:d} {:s}'.format(4, 'ssd')
plot_master_lwb_points('scatter', results, title)
results = 'zfs-workload/{:s}/results-project/{:d}-{:s}/{:d}pct-timeout'.format(TEST, 4, 'ssd', TIMEOUT)
title = 'project - {:d}% timeout - {:d} {:s}'.format(TIMEOUT, 4, 'ssd')
plot_master_lwb_points('scatter', results, title)
results = 'zfs-workload/{:s}/results-master/{:d}-{:s}/default'.format(TEST, 8, 'ssd')
title = 'baseline - {:d} {:s}'.format(8, 'ssd')
plot_master_lwb_points('scatter', results, title)
results = 'zfs-workload/{:s}/results-project/{:d}-{:s}/{:d}pct-timeout'.format(TEST, 8, 'ssd', TIMEOUT)
title = 'project - {:d}% timeout - {:d} {:s}'.format(TIMEOUT, 8, 'ssd')
plot_master_lwb_points('scatter', results, title)
results = 'zfs-workload/{:s}/results-master/{:d}-{:s}/default'.format(TEST, 1, 'ssd')
title = 'baseline - {:d} {:s}'.format(1, 'ssd')
plot_master_lwb_points('histogram', results, title)
results = 'zfs-workload/{:s}/results-project/{:d}-{:s}/{:d}pct-timeout'.format(TEST, 1, 'ssd', TIMEOUT)
title = 'project - {:d}% timeout - {:d} {:s}'.format(TIMEOUT, 1, 'ssd')
plot_master_lwb_points('histogram', results, title)
results = 'zfs-workload/{:s}/results-master/{:d}-{:s}/default'.format(TEST, 2, 'ssd')
title = 'baseline - {:d} {:s}'.format(2, 'ssd')
plot_master_lwb_points('histogram', results, title)
results = 'zfs-workload/{:s}/results-project/{:d}-{:s}/{:d}pct-timeout'.format(TEST, 2, 'ssd', TIMEOUT)
title = 'project - {:d}% timeout - {:d} {:s}'.format(TIMEOUT, 2, 'ssd')
plot_master_lwb_points('histogram', results, title)
results = 'zfs-workload/{:s}/results-master/{:d}-{:s}/default'.format(TEST, 4, 'ssd')
title = 'baseline - {:d} {:s}'.format(4, 'ssd')
plot_master_lwb_points('histogram', results, title)
results = 'zfs-workload/{:s}/results-project/{:d}-{:s}/{:d}pct-timeout'.format(TEST, 4, 'ssd', TIMEOUT)
title = 'project - {:d}% timeout - {:d} {:s}'.format(TIMEOUT, 4, 'ssd')
plot_master_lwb_points('histogram', results, title)
results = 'zfs-workload/{:s}/results-master/{:d}-{:s}/default'.format(TEST, 8, 'ssd')
title = 'baseline - {:d} {:s}'.format(8, 'ssd')
plot_master_lwb_points('histogram', results, title)
results = 'zfs-workload/{:s}/results-project/{:d}-{:s}/{:d}pct-timeout'.format(TEST, 8, 'ssd', TIMEOUT)
title = 'project - {:d}% timeout - {:d} {:s}'.format(TIMEOUT, 8, 'ssd')
plot_master_lwb_points('histogram', results, title)