001
014
015 package com.liferay.portal.tools.samplesqlbuilder;
016
017 import com.liferay.portal.dao.db.MySQLDB;
018 import com.liferay.portal.freemarker.FreeMarkerUtil;
019 import com.liferay.portal.kernel.dao.db.DB;
020 import com.liferay.portal.kernel.dao.db.DBFactoryUtil;
021 import com.liferay.portal.kernel.io.CharPipe;
022 import com.liferay.portal.kernel.io.OutputStreamWriter;
023 import com.liferay.portal.kernel.io.unsync.UnsyncBufferedReader;
024 import com.liferay.portal.kernel.io.unsync.UnsyncBufferedWriter;
025 import com.liferay.portal.kernel.io.unsync.UnsyncTeeWriter;
026 import com.liferay.portal.kernel.util.FileUtil;
027 import com.liferay.portal.kernel.util.GetterUtil;
028 import com.liferay.portal.kernel.util.ListUtil;
029 import com.liferay.portal.kernel.util.StringBundler;
030 import com.liferay.portal.kernel.util.StringPool;
031 import com.liferay.portal.tools.ArgumentsUtil;
032 import com.liferay.portal.util.InitUtil;
033
034 import java.io.File;
035 import java.io.FileInputStream;
036 import java.io.FileOutputStream;
037 import java.io.FileWriter;
038 import java.io.IOException;
039 import java.io.Reader;
040 import java.io.Writer;
041
042 import java.nio.channels.FileChannel;
043
044 import java.util.ArrayList;
045 import java.util.Collections;
046 import java.util.HashMap;
047 import java.util.List;
048 import java.util.Map;
049 import java.util.Set;
050 import java.util.concurrent.ConcurrentHashMap;
051
052
056 public class SampleSQLBuilder {
057
058 public static void main(String[] args) {
059 Map<String, String> arguments = ArgumentsUtil.parseArguments(args);
060
061 List<String> extraConfigLocations = new ArrayList<String>();
062
063 extraConfigLocations.add("META-INF/portlet-container-spring.xml");
064
065 InitUtil.initWithSpring(false, extraConfigLocations);
066
067 try {
068 new SampleSQLBuilder(arguments);
069 }
070 catch (Exception e) {
071 e.printStackTrace();
072 }
073 }
074
075 public SampleSQLBuilder(Map<String, String> arguments) throws Exception {
076 String baseDir = arguments.get("sample.sql.base.dir");
077
078 _dbType = arguments.get("sample.sql.db.type");
079 _maxAssetCategoryCount = GetterUtil.getInteger(
080 arguments.get("sample.sql.max.asset.category.count"));
081 _maxBlogsEntryCommentCount = GetterUtil.getInteger(
082 arguments.get("sample.sql.max.blogs.entry.comment.count"));
083 _maxBlogsEntryCount = GetterUtil.getInteger(
084 arguments.get("sample.sql.max.blogs.entry.count"));
085 _maxDDLRecordCount = GetterUtil.getInteger(
086 arguments.get("sample.sql.max.ddl.record.count"));
087 _maxDDLRecordSetCount = GetterUtil.getInteger(
088 arguments.get("sample.sql.max.ddl.record.set.count"));
089 _maxDLFileEntryCount = GetterUtil.getInteger(
090 arguments.get("sample.sql.max.dl.file.entry.count"));
091 _maxDLFileEntrySize = GetterUtil.getInteger(
092 arguments.get("sample.sql.max.dl.file.entry.size"));
093 _maxDLFolderCount = GetterUtil.getInteger(
094 arguments.get("sample.sql.max.dl.folder.count"));
095 _maxDLFolderDepth = GetterUtil.getInteger(
096 arguments.get("sample.sql.max.dl.folder.depth"));
097 _maxGroupCount = GetterUtil.getInteger(
098 arguments.get("sample.sql.max.group.count"));
099 _maxJournalArticleCount = GetterUtil.getInteger(
100 arguments.get("sample.sql.max.journal.article.count"));
101 _maxJournalArticlePageCount = GetterUtil.getInteger(
102 arguments.get("sample.sql.max.journal.article.page.count"));
103 _maxJournalArticleSize = GetterUtil.getInteger(
104 arguments.get("sample.sql.max.journal.article.size"));
105 _maxJournalArticleVersionCount = GetterUtil.getInteger(
106 arguments.get("sample.sql.max.journal.article.version.count"));
107 _maxMBCategoryCount = GetterUtil.getInteger(
108 arguments.get("sample.sql.max.mb.category.count"));
109 _maxMBMessageCount = GetterUtil.getInteger(
110 arguments.get("sample.sql.max.mb.message.count"));
111 _maxMBThreadCount = GetterUtil.getInteger(
112 arguments.get("sample.sql.max.mb.thread.count"));
113 _maxUserCount = GetterUtil.getInteger(
114 arguments.get("sample.sql.max.user.count"));
115 _maxUserToGroupCount = GetterUtil.getInteger(
116 arguments.get("sample.sql.max.user.to.group.count"));
117 _maxWikiNodeCount = GetterUtil.getInteger(
118 arguments.get("sample.sql.max.wiki.node.count"));
119 _maxWikiPageCommentCount = GetterUtil.getInteger(
120 arguments.get("sample.sql.max.wiki.page.comment.count"));
121 _maxWikiPageCount = GetterUtil.getInteger(
122 arguments.get("sample.sql.max.wiki.page.count"));
123 _optimizeBufferSize = GetterUtil.getInteger(
124 arguments.get("sample.sql.optimize.buffer.size"));
125 _outputDir = arguments.get("sample.sql.output.dir");
126 _outputMerge = GetterUtil.getBoolean(
127 arguments.get("sample.sql.output.merge"));
128
129 _dataFactory = new DataFactory(
130 baseDir, _maxAssetCategoryCount, _maxBlogsEntryCount,
131 _maxGroupCount, _maxJournalArticleCount, _maxJournalArticleSize,
132 _maxMBCategoryCount, _maxMBThreadCount, _maxMBMessageCount,
133 _maxUserToGroupCount);
134
135 _db = DBFactoryUtil.getDB(_dbType);
136
137 if (_db instanceof MySQLDB) {
138 _db = new SampleMySQLDB();
139 }
140
141
142
143 FileUtil.delete(_outputDir + "/sample-" + _dbType + ".sql");
144 FileUtil.deltree(_outputDir + "/output");
145
146
147
148 _tempDir = new File(_outputDir, "temp");
149
150 _tempDir.mkdirs();
151
152 final CharPipe charPipe = new CharPipe(_PIPE_BUFFER_SIZE);
153
154 generateSQL(charPipe);
155
156 try {
157
158
159
160 compressSQL(charPipe.getReader());
161
162
163
164 mergeSQL();
165 }
166 finally {
167 FileUtil.deltree(_tempDir);
168 }
169
170 StringBundler sb = new StringBundler();
171
172 List<String> keys = ListUtil.fromMapKeys(arguments);
173
174 Collections.sort(keys);
175
176 for (String key : keys) {
177 if (!key.startsWith("sample.sql")) {
178 continue;
179 }
180
181 String value = arguments.get(key);
182
183 sb.append(key);
184 sb.append(StringPool.EQUAL);
185 sb.append(value);
186 sb.append(StringPool.NEW_LINE);
187 }
188
189 FileUtil.write(
190 new File(_outputDir, "benchmarks-actual.properties"),
191 sb.toString());
192 }
193
194 protected void compressInsertSQL(String insertSQL) throws IOException {
195 String tableName = insertSQL.substring(0, insertSQL.indexOf(' '));
196
197 int pos = insertSQL.indexOf(" values ") + 8;
198
199 String values = insertSQL.substring(pos, insertSQL.length() - 1);
200
201 StringBundler sb = _insertSQLs.get(tableName);
202
203 if ((sb == null) || (sb.index() == 0)) {
204 sb = new StringBundler();
205
206 _insertSQLs.put(tableName, sb);
207
208 sb.append("insert into ");
209 sb.append(insertSQL.substring(0, pos));
210 sb.append("\n");
211 }
212 else {
213 sb.append(",\n");
214 }
215
216 sb.append(values);
217
218 if (sb.index() >= _optimizeBufferSize) {
219 sb.append(";\n");
220
221 String sql = _db.buildSQL(sb.toString());
222
223 sb.setIndex(0);
224
225 writeToInsertSQLFile(tableName, sql);
226 }
227 }
228
229 protected void compressSQL(Reader reader) throws IOException {
230 UnsyncBufferedReader unsyncBufferedReader = new UnsyncBufferedReader(
231 reader);
232
233 String s = null;
234
235 while ((s = unsyncBufferedReader.readLine()) != null) {
236 s = s.trim();
237
238 if (s.length() > 0) {
239 if (s.startsWith("insert into ")) {
240 compressInsertSQL(s.substring(12));
241 }
242 else if (s.length() > 0) {
243 _otherSQLs.add(s);
244 }
245 }
246 }
247
248 unsyncBufferedReader.close();
249 }
250
251 protected Writer createFileWriter(File file) throws IOException {
252 FileOutputStream fileOutputStream = new FileOutputStream(file);
253
254 Writer writer = new OutputStreamWriter(fileOutputStream);
255
256 return createUnsyncBufferedWriter(writer);
257 }
258
259 protected Writer createFileWriter(String fileName) throws IOException {
260 File file = new File(fileName);
261
262 return createFileWriter(file);
263 }
264
265 protected Writer createUnsyncBufferedWriter(Writer writer) {
266 return new UnsyncBufferedWriter(writer, _WRITER_BUFFER_SIZE) {
267
268 @Override
269 public void flush() {
270
271
272
273 }
274
275 };
276 }
277
278 protected void generateSQL(final CharPipe charPipe) {
279 final Writer writer = createUnsyncBufferedWriter(charPipe.getWriter());
280
281 Thread thread = new Thread() {
282
283 @Override
284 public void run() {
285 try {
286 _writerSampleSQL = new UnsyncTeeWriter(
287 writer, createFileWriter(_outputDir + "/sample.sql"));
288
289 createSample();
290
291 _writerSampleSQL.close();
292
293 charPipe.close();
294 }
295 catch (Exception e) {
296 e.printStackTrace();
297 }
298 }
299
300 protected void createSample() throws Exception {
301 _writerBlogsCSV = getWriter("blogs.csv");
302 _writerCompanyCSV = getWriter("company.csv");
303 _writerDocumentLibraryCSV = getWriter("document_library.csv");
304 _writerDynamicDataListsCSV = getWriter(
305 "dynamic_data_lists.csv");
306 _writerLayoutCSV = getWriter("layout.csv");
307 _writerMessageBoardsCSV = getWriter("message_boards.csv");
308 _writerRepositoryCSV = getWriter("repository.csv");
309 _writerWikiCSV = getWriter("wiki.csv");
310
311 Map<String, Object> context = getContext();
312
313 processTemplate(_tplSample, context);
314
315 _writerBlogsCSV.close();
316 _writerCompanyCSV.close();
317 _writerDocumentLibraryCSV.close();
318 _writerDynamicDataListsCSV.close();
319 _writerLayoutCSV.close();
320 _writerMessageBoardsCSV.close();
321 _writerRepositoryCSV.close();
322 _writerWikiCSV.close();
323 }
324
325 protected Writer getWriter(String fileName) throws Exception {
326 return createFileWriter(new File(_outputDir + "/" + fileName));
327 }
328
329 };
330
331 thread.start();
332 }
333
334 protected Map<String, Object> getContext() {
335 Map<String, Object> context = new HashMap<String, Object>();
336
337 put(context, "counter", _dataFactory.getCounter());
338 put(context, "dataFactory", _dataFactory);
339 put(context, "maxAssetCategoryCount", _maxAssetCategoryCount);
340 put(context, "maxDLFileEntrySize", _maxDLFileEntrySize);
341 put(context, "maxBlogsEntryCommentCount", _maxBlogsEntryCommentCount);
342 put(context, "maxBlogsEntryCount", _maxBlogsEntryCount);
343 put(context, "maxDDLRecordCount", _maxDDLRecordCount);
344 put(context, "maxDDLRecordSetCount", _maxDDLRecordSetCount);
345 put(context, "maxDLFileEntryCount", _maxDLFileEntryCount);
346 put(context, "maxDLFolderCount", _maxDLFolderCount);
347 put(context, "maxDLFolderDepth", _maxDLFolderDepth);
348 put(context, "maxGroupCount", _maxGroupCount);
349 put(context, "maxJournalArticleCount", _maxJournalArticleCount);
350 put(context, "maxJournalArticlePageCount", _maxJournalArticlePageCount);
351 put(
352 context, "maxJournalArticleVersionCount",
353 _maxJournalArticleVersionCount);
354 put(context, "maxMBCategoryCount", _maxMBCategoryCount);
355 put(context, "maxMBMessageCount", _maxMBMessageCount);
356 put(context, "maxMBThreadCount", _maxMBThreadCount);
357 put(context, "maxUserCount", _maxUserCount);
358 put(context, "maxUserToGroupCount", _maxUserToGroupCount);
359 put(context, "maxWikiNodeCount", _maxWikiNodeCount);
360 put(context, "maxWikiPageCommentCount", _maxWikiPageCommentCount);
361 put(context, "maxWikiPageCount", _maxWikiPageCount);
362 put(context, "writerBlogsCSV", _writerBlogsCSV);
363 put(context, "writerCompanyCSV", _writerCompanyCSV);
364 put(context, "writerDocumentLibraryCSV", _writerDocumentLibraryCSV);
365 put(context, "writerDynamicDataListsCSV", _writerDynamicDataListsCSV);
366 put(context, "writerLayoutCSV", _writerLayoutCSV);
367 put(context, "writerMessageBoardsCSV", _writerMessageBoardsCSV);
368 put(context, "writerRepositoryCSV", _writerRepositoryCSV);
369 put(context, "writerWikiCSV", _writerWikiCSV);
370
371 return context;
372 }
373
374 protected File getInsertSQLFile(String tableName) {
375 return new File(_tempDir, tableName + ".sql");
376 }
377
378 protected void mergeSQL() throws IOException {
379 File outputFile = new File(_outputDir + "/sample-" + _dbType + ".sql");
380
381 FileOutputStream fileOutputStream = null;
382 FileChannel fileChannel = null;
383
384 if (_outputMerge) {
385 fileOutputStream = new FileOutputStream(outputFile);
386 fileChannel = fileOutputStream.getChannel();
387 }
388
389 Set<Map.Entry<String, StringBundler>> insertSQLs =
390 _insertSQLs.entrySet();
391
392 for (Map.Entry<String, StringBundler> entry : insertSQLs) {
393 String tableName = entry.getKey();
394
395 String sql = _db.buildSQL(entry.getValue().toString());
396
397 writeToInsertSQLFile(tableName, sql);
398
399 Writer insertSQLWriter = _insertSQLWriters.remove(tableName);
400
401 insertSQLWriter.write(";\n");
402
403 insertSQLWriter.close();
404
405 if (_outputMerge) {
406 File insertSQLFile = getInsertSQLFile(tableName);
407
408 FileInputStream insertSQLFileInputStream = new FileInputStream(
409 insertSQLFile);
410
411 FileChannel insertSQLFileChannel =
412 insertSQLFileInputStream.getChannel();
413
414 insertSQLFileChannel.transferTo(
415 0, insertSQLFileChannel.size(), fileChannel);
416
417 insertSQLFileChannel.close();
418
419 insertSQLFile.delete();
420 }
421 }
422
423 Writer writer = null;
424
425 if (_outputMerge) {
426 writer = new OutputStreamWriter(fileOutputStream);
427 }
428 else {
429 writer = new FileWriter(getInsertSQLFile("others"));
430 }
431
432 for (String sql : _otherSQLs) {
433 sql = _db.buildSQL(sql);
434
435 writer.write(sql);
436 writer.write(StringPool.NEW_LINE);
437 }
438
439 writer.close();
440
441 File outputFolder = new File(_outputDir, "output");
442
443 if (!_outputMerge && !_tempDir.renameTo(outputFolder)) {
444
445
446
447
448 FileUtil.copyDirectory(_tempDir, outputFolder);
449 }
450 }
451
452 protected void processTemplate(String name, Map<String, Object> context)
453 throws Exception {
454
455 FreeMarkerUtil.process(name, context, _writerSampleSQL);
456 }
457
458 protected void put(Map<String, Object> context, String key, Object value) {
459 context.put(key, value);
460 }
461
462 protected void writeToInsertSQLFile(String tableName, String sql)
463 throws IOException {
464
465 Writer writer = _insertSQLWriters.get(tableName);
466
467 if (writer == null) {
468 File file = getInsertSQLFile(tableName);
469
470 writer = createFileWriter(file);
471
472 _insertSQLWriters.put(tableName, writer);
473 }
474
475 writer.write(sql);
476 }
477
478 private static final int _PIPE_BUFFER_SIZE = 16 * 1024 * 1024;
479
480 private static final String _TPL_ROOT =
481 "com/liferay/portal/tools/samplesqlbuilder/dependencies/";
482
483 private static final int _WRITER_BUFFER_SIZE = 16 * 1024;
484
485 private DataFactory _dataFactory;
486 private DB _db;
487 private String _dbType;
488 private Map<String, StringBundler> _insertSQLs =
489 new ConcurrentHashMap<String, StringBundler>();
490 private Map<String, Writer> _insertSQLWriters =
491 new ConcurrentHashMap<String, Writer>();
492 private int _maxAssetCategoryCount;
493 private int _maxBlogsEntryCommentCount;
494 private int _maxBlogsEntryCount;
495 private int _maxDDLRecordCount;
496 private int _maxDDLRecordSetCount;
497 private int _maxDLFileEntryCount;
498 private int _maxDLFileEntrySize;
499 private int _maxDLFolderCount;
500 private int _maxDLFolderDepth;
501 private int _maxGroupCount;
502 private int _maxJournalArticleCount;
503 private int _maxJournalArticlePageCount;
504 private int _maxJournalArticleSize;
505 private int _maxJournalArticleVersionCount;
506 private int _maxMBCategoryCount;
507 private int _maxMBMessageCount;
508 private int _maxMBThreadCount;
509 private int _maxUserCount;
510 private int _maxUserToGroupCount;
511 private int _maxWikiNodeCount;
512 private int _maxWikiPageCommentCount;
513 private int _maxWikiPageCount;
514 private int _optimizeBufferSize;
515 private List<String> _otherSQLs = new ArrayList<String>();
516 private String _outputDir;
517 private boolean _outputMerge;
518 private File _tempDir;
519 private String _tplSample = _TPL_ROOT + "sample.ftl";
520 private Writer _writerBlogsCSV;
521 private Writer _writerCompanyCSV;
522 private Writer _writerDocumentLibraryCSV;
523 private Writer _writerDynamicDataListsCSV;
524 private Writer _writerLayoutCSV;
525 private Writer _writerMessageBoardsCSV;
526 private Writer _writerRepositoryCSV;
527 private Writer _writerSampleSQL;
528 private Writer _writerWikiCSV;
529
530 }