This repository has been archived by the owner on Apr 1, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 23
/
tpch_prepare
executable file
·245 lines (211 loc) · 10.6 KB
/
tpch_prepare
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
#!/bin/bash
# This script has numerous Bashisms, and is assumed to be runing on a recent
# Linux system (certainly, Kernel 2.6+).
# Assume that we're generally interested in re-initializing data;
# it may be useful to turn off REMAKE_DATA for repeated runs.
BASEDIR=$(dirname "$0")
BASEDIR=$(cd "$BASEDIR"; pwd)
. "$BASEDIR/pgtpch_defaults"
# Current time
t=$(timer)
# Initialize DB if $PGDATADIR does not exist
if ! [ -d "$PGDATADIR" ]; then
sudo -u $PGUSER mkdir -p "$PGDATADIR"
sudo -u $PGUSER $PGBINDIR/initdb -D "$PGDATADIR" --encoding=UTF-8 --locale=C
CREATE_NEW_DB=true
else
CREATE_NEW_DB=false
fi
# Start a new instance of Postgres
sudo -u $PGUSER $PGBINDIR/postgres -D "$PGDATADIR" -p $PGPORT &
PGPID=$!
while ! sudo -u $PGUSER $PGBINDIR/pg_ctl status -D $PGDATADIR | grep "server is running" -q; do
echo "Waiting for the Postgres server to start"
sleep 2
done
if $CREATE_NEW_DB; then
sudo -u $PGUSER $PGBINDIR/createdb -h /tmp -p $PGPORT $PGUSER --encoding=UTF-8 --locale=C
fi
WAL_LEVEL_MINIMAL=`sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT -c 'show wal_level' -t | grep minimal | wc -l`
DEBUG_ASSERTIONS=`sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT -c 'show debug_assertions' -t | grep on | wc -l`
# disable resolving of *.tbl to '*.tbl' in case there are no matching files
shopt -s nullglob
# Remind me of my current settings
sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT -c "select name, current_setting(name) from pg_settings where name
in('debug_assertions', 'wal_level', 'checkpoint_segments', 'shared_buffers', 'wal_buffers',
'fsync', 'maintenance_work_mem', 'checkpoint_completion_target',
'max_connections');"
if [ $WAL_LEVEL_MINIMAL != 1 ] ;
then
echo "Warning: Postgres wal_level is not set to minimal; 'Elide WAL traffic' optimization cannot be used">&2
fi
if [ $DEBUG_ASSERTIONS = 1 ] ;
then
echo "Error: debug_assertions are enabled">&2
exit -1
fi
cd "$BASEDIR/dbgen"
if ! [ -x dbgen ] || ! [ -x qgen ];
then
make -j $CORES
fi
sudo -u $PGUSER mkdir -p "$TPCHTMP" || die "Failed to create temporary directory: '$TPCHTMP'"
if $REMAKE_DATA
then
cd "$TPCHTMP"
sudo -u $PGUSER cp "$BASEDIR/dbgen/dists.dss" .
# Run dbgen with "force", to overwrite existing files
# Create table files separately to have better IO throughput
sudo -u $PGUSER "$BASEDIR/dbgen/dbgen" -s $SCALE -f -v -T c &
sudo -u $PGUSER "$BASEDIR/dbgen/dbgen" -s $SCALE -f -v -T s &
sudo -u $PGUSER "$BASEDIR/dbgen/dbgen" -s $SCALE -f -v -T n &
sudo -u $PGUSER "$BASEDIR/dbgen/dbgen" -s $SCALE -f -v -T r &
sudo -u $PGUSER "$BASEDIR/dbgen/dbgen" -s $SCALE -f -v -T O &
sudo -u $PGUSER "$BASEDIR/dbgen/dbgen" -s $SCALE -f -v -T L &
sudo -u $PGUSER "$BASEDIR/dbgen/dbgen" -s $SCALE -f -v -T P &
sudo -u $PGUSER "$BASEDIR/dbgen/dbgen" -s $SCALE -f -v -T S &
fi
# Wait for all pending jobs to finish.
for p in $(jobs -p);
do
if [ $p != $PGPID ];
then
wait $p
fi
done
data_loading_configuration="
checkpoint_segments = 300
checkpoint_timeout = 3600s
checkpoint_completion_target = 0.9
shared_buffers = 32GB
maintenance_work_mem = 1GB
"
logging_configuration="
log_destination = 'stderr'
logging_collector = on
log_directory = 'pg_log'
log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log'
log_file_mode = 0600
client_min_messages = notice
log_min_messages = warning
log_min_error_statement = error
log_statement = 'all'
log_timezone = 'Europe/Madrid'
"
if $POPULATE_DB
then
echo "DROP DATABASE IF EXISTS $DB_NAME" | sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT
# Make sure we're all on the same page wrt encoding, collations, etc.
sudo -u $PGUSER $PGBINDIR/createdb -h /tmp -p $PGPORT $DB_NAME --encoding=UTF-8 --locale=C
if [ $? != 0 ]; then
# Did you forget to disconnect from the database before dropping?
echo "Error: Can't proceed without database"
exit -1
fi
TIME=`date`
sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT -d $DB_NAME -c "comment on database $DB_NAME is 'TPC-H data, created at $TIME'"
sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT -d $DB_NAME < "$BASEDIR/dbgen/dss.ddl"
# Configuration parameters for efficient data loading
echo "$data_loading_configuration" | sudo -u $PGUSER tee -a $PGDATADIR/postgresql.conf
if $LOGGING
then
echo "$logging_configuration" | sudo -u $PGUSER tee -a $PGDATADIR/postgresql.conf
fi
sudo -u $PGUSER $PGBINDIR/pg_ctl reload -D $PGDATADIR
cd "$TPCHTMP"
for f in *.tbl; do
bf="$(basename $f .tbl)"
# We truncate the empty table in the sames transaction to enable Postgres to
# safely skip WAL-logging. See
# http://www.postgresql.org/docs/current/static/populate.html#POPULATE-PITR
echo "truncate $bf; COPY $bf FROM '$(pwd)/$f' WITH DELIMITER AS '|'" | sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT -d $DB_NAME &
done
# TODO: It would be nice if there was a way to limit the number of
# concurrently executing jobs to $CORES. It is surprisingly easy to make COPY
# CPU-bound.
for p in $(jobs -p); do
if [ $p == $PGPID ]; then continue; fi
wait $p;
done
# Create primary and foreign keys
sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT -d $DB_NAME < "$BASEDIR/dbgen/dss.ri"
fi
cd "$BASEDIR"
sudo -u $PGUSER rm -rf "$TPCHTMP"
# Since wal_level is hopefully set to 'minimal', it ought to be possible to skip
# WAL logging these create index operations, too.
if $CREATE_ALL_INDEXES
then
echo "Creating 'All' indexes..."
# Primary key definitions already create indexes
#sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT -d $DB_NAME -c "CREATE INDEX i_r_regionkey ON region (r_regionkey);" &
#sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT -d $DB_NAME -c "CREATE INDEX i_n_nationkey ON nation (n_nationkey);" &
#sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT -d $DB_NAME -c "CREATE INDEX i_p_partkey ON part (p_partkey);" &
#sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT -d $DB_NAME -c "CREATE INDEX i_s_suppkey ON supplier (s_suppkey);" &
#sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT -d $DB_NAME -c "CREATE INDEX i_c_custkey ON customer (c_custkey);" &
#sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT -d $DB_NAME -c "CREATE INDEX i_o_orderkey ON orders (o_orderkey);" &
# Pg does not create indexed on foreign keys, create them manually
sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT -d $DB_NAME -c "CREATE INDEX i_n_regionkey ON nation (n_regionkey);" & # not used on 1GB
sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT -d $DB_NAME -c "CREATE INDEX i_s_nationkey ON supplier (s_nationkey);" &
sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT -d $DB_NAME -c "CREATE INDEX i_c_nationkey ON customer (c_nationkey);" &
sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT -d $DB_NAME -c "CREATE INDEX i_ps_suppkey ON partsupp (ps_suppkey);" &
sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT -d $DB_NAME -c "CREATE INDEX i_ps_partkey ON partsupp (ps_partkey);" &
sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT -d $DB_NAME -c "CREATE INDEX i_o_custkey ON orders (o_custkey);" &
sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT -d $DB_NAME -c "CREATE INDEX i_l_orderkey ON lineitem (l_orderkey);" &
sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT -d $DB_NAME -c "CREATE INDEX i_l_suppkey_partkey ON lineitem (l_partkey, l_suppkey);" &
# other indexes
sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT -d $DB_NAME -c "CREATE INDEX i_l_shipdate ON lineitem (l_shipdate);" &
sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT -d $DB_NAME -c "CREATE INDEX i_l_partkey ON lineitem (l_partkey);" &
sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT -d $DB_NAME -c "CREATE INDEX i_l_suppkey ON lineitem (l_suppkey);" &
sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT -d $DB_NAME -c "CREATE INDEX i_l_receiptdate ON lineitem (l_receiptdate);" &
sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT -d $DB_NAME -c "CREATE INDEX i_l_orderkey_quantity ON lineitem (l_orderkey, l_quantity);" &
sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT -d $DB_NAME -c "CREATE INDEX i_o_orderdate ON orders (o_orderdate);" &
sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT -d $DB_NAME -c "CREATE INDEX i_l_commitdate ON lineitem (l_commitdate);" & # not used on 1GB
fi
if $CREATE_MIN_INDEXES
then
echo "Creating 'Min' indexes..."
sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT -d $DB_NAME -c "CREATE INDEX n_nationkey_idx on nation (n_nationkey);" &
sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT -d $DB_NAME -c "CREATE INDEX r_regionkey_idx on region (r_regionkey);" &
sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT -d $DB_NAME -c "CREATE INDEX p_partkey_idx on part (p_partkey);" &
sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT -d $DB_NAME -c "CREATE INDEX s_suppkey_idx on supplier (s_suppkey);" &
sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT -d $DB_NAME -c "CREATE INDEX ps_partkey_idx on partsupp (ps_partkey);" &
sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT -d $DB_NAME -c "CREATE INDEX c_custkey_idx on customer (c_custkey);" &
sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT -d $DB_NAME -c "CREATE INDEX o_orderkey_idx on orders (o_orderkey);" &
sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT -d $DB_NAME -c "CREATE INDEX l_orderkey_idx on lineitem (l_orderkey);" &
sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT -d $DB_NAME -c "CREATE INDEX l_partkey_idx on lineitem (l_partkey);" &
fi
for p in $(jobs -p); do
if [ $p == $PGPID ]; then continue; fi
wait $p;
done
# Configuration for query execution
query_configuration="
checkpoint_timeout = 600s
work_mem = 12GB #2 to 16GB
effective_cache_size = 192GB #3/4 of total RAM (192GB)
default_statistics_target = 5000 #requires analyze to take effect
maintenance_work_mem = 32MB
"
echo "$query_configuration" | sudo -u $PGUSER tee -a $PGDATADIR/postgresql.conf
sudo -u $PGUSER $PGBINDIR/pg_ctl reload -D $PGDATADIR
# Disable transparent huge pages
echo never | sudo tee /sys/kernel/mm/transparent_hugepage/defrag
# Always analyze after bulk-loading; when hacking Postgres, typically Postgres
# is run with autovacuum turned off.
echo "Running vacuum freeze analyze..."
sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT -d $DB_NAME -c "vacuum freeze"
sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT -d $DB_NAME -c "analyze"
# Checkpoint, so we have a "clean slate". Just in-case.
echo "Checkpointing..."
sudo -u $PGUSER $PGBINDIR/psql -h /tmp -p $PGPORT -d $DB_NAME -c "checkpoint"
cd "$BASEDIR/dbgen"
for i in $(seq 1 22);
do
ii=$(printf "%02d" $i)
mkdir -p "../queries"
DSS_QUERY=queries ./qgen $i >../queries/q$ii.sql
sed 's/^select/explain select/' ../queries/q$ii.sql > ../queries/q$ii.explain.sql
sed 's/^select/explain analyze select/' ../queries/q$ii.sql > ../queries/q$ii.analyze.sql
done
printf 'Elapsed time: %s\n' $(timer $t)