View Javadoc

1    /***
2    * Copyright 2009 ATG DUST Project Licensed under the Apache License, Version
3    * 2.0 (the "License"); you may not use this file except in compliance with the
4    * License. You may obtain a copy of the License at
5    * http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law
6    * or agreed to in writing, software distributed under the License is
7    * distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
8    * KIND, either express or implied. See the License for the specific language
9    * governing permissions and limitations under the License.
10   */
11  package atg.adapter.gsa;
12  
13  import java.io.File;
14  import java.io.PrintWriter;
15  import java.sql.SQLException;
16  import java.util.ArrayList;
17  import java.util.Collection;
18  import java.util.HashSet;
19  import java.util.Iterator;
20  import java.util.List;
21  import java.util.Properties;
22  import java.util.Set;
23  import java.util.Vector;
24  
25  import javax.transaction.TransactionManager;
26  
27  import org.apache.ddlutils.DatabaseOperationException;
28  
29  import atg.adapter.gsa.xml.TemplateParser;
30  import atg.dtm.TransactionDemarcation;
31  import atg.dtm.TransactionDemarcationException;
32  import atg.junit.nucleus.TestUtils;
33  import atg.naming.NameContext;
34  import atg.naming.NameContextBindingEvent;
35  import atg.nucleus.Configuration;
36  import atg.nucleus.GenericService;
37  import atg.nucleus.Nucleus;
38  import atg.nucleus.NucleusNameResolver;
39  import atg.nucleus.ServiceEvent;
40  import atg.nucleus.ServiceException;
41  import atg.nucleus.logging.LogListener;
42  import atg.repository.RepositoryException;
43  
44  /***
45   * This class is an extension of atg.adapter.gsa.GSARepository. It's purpose is
46   * to create tables and initial data required for starting a given repository.
47   * 
48   * @author mfrenzel
49   * @version 1.0
50   */
51  
52  public class InitializingGSA extends GSARepository {
53  
54    // -------------------------------------
55    /*** Class version string */
56  
57    public static String CLASS_VERSION = "$Id: //test/UnitTests/base/main/src/Java/atg/test/apiauto/util/InitializingGSA.java#11 $$Change: 550950 $";
58  
59    // -----------------------------------
60    // ---From Properties File------------
61  
62    private boolean mUseDDLUtils = true;
63  
64    /***
65     * If true then Apache DDLUtils will be used to generate the schema. Otherwise
66     * the GSA generated SQL will be used.
67     * 
68     * @return the useDDLUtils
69     */
70    public boolean isUseDDLUtils() {
71      return mUseDDLUtils;
72    }
73  
74    /***
75     * If true then Apache DDLUtils will be used to generate the schema. Otherwise
76     * the GSA generated SQL will be used.
77     * 
78     * @param pUseDDLUtils
79     *          the useDDLUtils to set
80     */
81    public void setUseDDLUtils(boolean pUseDDLUtils) {
82      mUseDDLUtils = pUseDDLUtils;
83    }
84  
85    // do we want to create tables if they don't exist
86    private boolean mCreateTables = true;
87  
88    public void setCreateTables(boolean pCreate) {
89      mCreateTables = pCreate;
90    }
91  
92    public boolean isCreateTables() {
93      return mCreateTables;
94    }
95  
96    // do we want to drop tables that exist if we want to create
97    // a table with the same name
98    private boolean mDropTables = false;
99  
100   public void setDropTablesIfExist(boolean pDrop) {
101     mDropTables = pDrop;
102   }
103 
104   public boolean isDropTablesIfExist() {
105     return mDropTables;
106   }
107 
108   // the XML files containing export data from the TemplateParser
109   // it will be imported into the database after tables are created
110   // we load the files as Files instead of XMLFiles because the
111   // TemplateParser requires a full file path to the import file
112   // instead of using the CONFIGPATH
113   private File[] mImportFiles = null;
114 
115   public void setImportFiles(File[] pFiles) {
116     mImportFiles = pFiles;
117   }
118 
119   public File[] getImportFiles() {
120     return mImportFiles;
121   }
122 
123   public String[] getImportFilesAsStrings() {
124     File[] f = getImportFiles();
125     if (f == null)
126       return null;
127 
128     List<String> v = new ArrayList<String>();
129     for (int i = 0; i < f.length; i++) {
130       if (!v.contains(f[i].getAbsolutePath()))
131         v.add(f[i].getAbsolutePath());
132     }
133 
134     return (String[]) v.toArray(new String[v.size()]);
135   }
136 
137   // do we want to strip the 'references(..)' statements from SQL
138   // created by the GSA
139   private boolean mStripReferences = true;
140 
141   public void setStripReferences(boolean pStrip) {
142     mStripReferences = pStrip;
143   }
144 
145   public boolean isStripReferences() {
146     return mStripReferences;
147   }
148 
149   // do we want to show the create table statements that are executed
150   private boolean mShowCreate = false;
151 
152   public void setloggingCreateTables(boolean pLog) {
153     mShowCreate = pLog;
154   }
155 
156   public boolean isLoggingCreateTables() {
157     return mShowCreate;
158   }
159 
160   /*
161    * the SQLProcessorEngine to use for creating tables this property is optional
162    * because we'll create a default SQLProcessorEngine if the property isn't set
163    */
164   private SQLProcessorEngine mSQLProcessor = null;
165 
166   public void setSQLProcessor(SQLProcessorEngine pEngine) {
167     mSQLProcessor = pEngine;
168   }
169 
170   public SQLProcessorEngine getSQLProcessor() {
171     // create a new processor if one isn't set
172     if (mSQLProcessor == null) {
173       mSQLProcessor = new SQLProcessorEngine(this);
174       mSQLProcessor.setLoggingDebug(this.isLoggingDebug());
175       mSQLProcessor.setLoggingError(this.isLoggingError());
176       mSQLProcessor.setLoggingInfo(this.isLoggingInfo());
177       mSQLProcessor.setLoggingWarning(this.isLoggingWarning());
178       LogListener[] listeners = this.getLogListeners();
179       for (int i = 0; i < listeners.length; i++) {
180         mSQLProcessor.addLogListener(listeners[i]);
181       }
182     }
183 
184     return mSQLProcessor;
185   }
186 
187   /***
188    * boolean indicating whether we should perform the import every time Dynamo
189    * starts, or only perform the import if we created at least one table. NOTE:
190    * if dropTablesIfExist is true, the import will occur every time because we
191    * will create tables every time. default: false - only perform the import
192    * when tables are created
193    */
194   private boolean mImportEveryStartup = false;
195 
196   public void setImportEveryStartup(boolean pImport) {
197     mImportEveryStartup = pImport;
198   }
199 
200   public boolean isImportEveryStartup() {
201     return mImportEveryStartup;
202   }
203 
204   /***
205    * boolean indicating whether we should drop all tables associated with this
206    * repository when Dynamo is shut down. NOTE: this will only work properly is
207    * Dynamo is shutdown properly. It will not work if Dynamo is just killed
208    * default: false
209    */
210   private boolean mDropTablesAtShutdown = false;
211 
212   public void setDropTablesAtShutdown(boolean pDrop) {
213     mDropTablesAtShutdown = pDrop;
214   }
215 
216   public boolean isDropTablesAtShutdown() {
217     return mDropTablesAtShutdown;
218   }
219 
220   /***
221    * boolean indicating whether to wrap each imported file in it's own
222    * transaction. this is a new option in D5.5 that has changed the method
223    * signature of atg.adapter.gsa.xml.TemplateParser.importFiles() default: true
224    */
225   private boolean mImportWithTransaction = true;
226 
227   public void setImportWithTransaction(boolean pTran) {
228     mImportWithTransaction = pTran;
229   }
230 
231   public boolean isImportWithTransaction() {
232     return mImportWithTransaction;
233   }
234 
235   private Properties mSqlCreateFiles = new Properties();
236 
237   /***
238    * Optional mapping of user-specified sql files that should be executed
239    * instead of the SQL generated by startSQLRepository. Key values must be one
240    * of (case sensitive): <b>default </b>, <b>oracle </b>, <b>solid </b>,
241    * <b>informix </b>, <b>microsoft </b>, <b>sybase </b>, or <b>db2 </b>. Mapped
242    * values should be a colon (:) separated ordered list of files to execute for
243    * that database type. <br>
244    * Specified files may use
245    * 
246    * <pre>
247    * {....}
248    * </pre>
249    * 
250    * notation to indicate a System variable that should be substituted at
251    * runtime, such as
252    * 
253    * <pre>
254    * { atg.dynamo.root }
255    * </pre>
256    * 
257    * .
258    * <p>
259    * The following behavior is observed:
260    * 
261    * <pre>
262    * 
263    *           a) database meta data is used to determine specific database type
264    *           b) when &lt;b&gt;default&lt;/b&gt; not specified
265    *             - if mapping exists for specific db type, those files are executed
266    *             - otherwise, output from startSQLRepository is executed
267    *           c) when &lt;b&gt;default&lt;/b&gt; is specified
268    *             - if mapping exists for specific db type, those files are executed
269    *             - otherwise, files mapped under default are executed
270    *           d) if a mapping exists for a db type in 'sqlCreateFiles' then a corresponding
271    *              entry (to the specific db type, or to default) must exist.  Otherwise an exception
272    *              is thrown at starup.
273    *   
274    * </pre>
275    * <p>
276    * Also, when a file specified in the property 'sqlCreateFiles' is used (i.e.
277    * output from startSQLRepository is not being used) then the initializingGSA
278    * will always do the following at startup, unless property
279    * 'executeCreateAndDropScripts' is set to false:
280    * 
281    * <pre>
282    * 
283    *           a) execute the appropriate dropSqlFile(s)
284    *           b) execute the appropriate createSqlFile(s)
285    *   
286    * </pre>
287    * 
288    * If 'executeCreateAndDropScripts' is false then in the case where scripts
289    * normally would be run they will instead be skipped and no SQL (from scripts
290    * or startSQLRepository) will be executed. The reason for this restriction is
291    * that it's too difficult to know whether a database has been properly reset
292    * for the 'createSqlFile(s)' to run properly, so we err on the conservative
293    * side and always reset it.
294    */
295   public void setSqlCreateFiles(Properties pFiles) {
296     mSqlCreateFiles = pFiles;
297   }
298 
299   /***
300    * returns optional mapping of user-specified sql files that should be
301    * executed instead of the SQL generated by startSQLRepository. see
302    * 'setSqlCreateFiles' for detailed explanation of this property.
303    */
304   public Properties getSqlCreateFiles() {
305     return mSqlCreateFiles;
306   }
307 
308   private Properties mSqlDropFiles = new Properties();
309 
310   /***
311    * returns optional mapping of user-specified sql files that should be
312    * executed during 'tear-down' instead of basing it on the SQL generated by
313    * startSQLRepository. see 'setSqlCreateFiles' for detailed explanation of
314    * this property.
315    */
316   public void setSqlDropFiles(Properties pFiles) {
317     mSqlDropFiles = pFiles;
318   }
319 
320   /***
321    * returns optional mapping of user-specified sql files that should be
322    * executed during 'tear-down' instead of basing it on the SQL generated by
323    * startSQLRepository. see 'setSqlCreateFiles' for detailed explanation of
324    * this property.
325    */
326   public Properties getSqlDropFiles() {
327     return mSqlDropFiles;
328   }
329   
330   public boolean mAllowNoDrop = true;
331 
332   /***
333    * If true, one may specify create scripts, but no drop scripts. Otherwise it
334    * is an error to specify a create script but no drop script
335    * 
336    * @return
337    */
338   public boolean isAllowNoDrop() {
339     return mAllowNoDrop;
340   }
341   
342   public void setAllowNotDrop(boolean pDrop) {
343     mAllowNoDrop = pDrop;
344   }
345 
346   private boolean mExecuteCreateDropScripts = true;
347 
348   /***
349    * if set to true then create and drop scripts mapped through properties
350    * 'setSqlCreateFiles' and 'getSqlCreateFiles' will be executed. otherwise the
351    * scripts will not be executed at startup.
352    */
353   public void setExecuteCreateAndDropScripts(boolean pExec) {
354     mExecuteCreateDropScripts = pExec;
355   }
356 
357   /***
358    * returns true if create and drop scripts mapped through properties
359    * 'setSqlCreateFiles' and 'getSqlCreateFiles' should be executed at startup.
360    */
361   public boolean isExecuteCreateAndDropScripts() {
362     return mExecuteCreateDropScripts;
363   }
364 
365   private boolean mLoadColumnInfosAtInitialStartup = false;
366 
367   /***
368    * returns true if the GSA should load JDBC metadata when starting the initial
369    * instantiation of the component. default: false
370    */
371   public boolean isLoadColumnInfosAtInitialStartup() {
372     return mLoadColumnInfosAtInitialStartup;
373   }
374 
375   /***
376    * set to true if the GSA should load JDBC metadata when starting the initial
377    * instantiation of the component. the default is false b/c the initial
378    * instantiation is only used to create tables and loading the metadata before
379    * the tables are created is unnecessary overhead which slows the startup
380    * process. When the component is restarted after the tables are created it
381    * uses the value of 'loadColumnInfosAtStartup' to determine whether to load
382    * the metadata on the restart.
383    */
384   public void setLoadColumnInfosAtInitialStartup(boolean pLoad) {
385     mLoadColumnInfosAtInitialStartup = pLoad;
386   }
387 
388   // -------------------------------------------------------------------------
389   // Member properties
390 
391   // this property is a little tricky and a bit of a hack, but it
392   // allows us to create the tables, etc on startup. When the component
393   // is initially started this will be false, but when it calls restart,
394   // we set it to true for the new instantiation to avoid infinitely
395   // recursing into new repositories
396   private boolean mTemporaryInstantiation = false;
397 
398   public void setIsTemporaryInstantiation(boolean pTemp) {
399     mTemporaryInstantiation = pTemp;
400   }
401 
402   private boolean isTemporaryInstantiation() {
403     return mTemporaryInstantiation;
404   }
405   
406   public boolean mRestartAfterTableCreation = true;
407 
408   private GSARepositorySchemaGenerator mGenerator;
409 
410   /***
411    * Returns true if this repository will attempt to "restart" after creating
412    * tables.
413    * 
414    * @return
415    */
416   public boolean isRestartingAfterTableCreation() {
417     return mRestartAfterTableCreation;
418   }
419 
420   /***
421    * Sets if this repository will attempt to "restart" after creating tables. A
422    * value of true means that it should restart.
423    */
424   public void setRestartingAfterTableCreation(boolean pRestart) {
425     mRestartAfterTableCreation = pRestart;
426   }
427 
428   // -------------------------------------------------------------------------
429   // Methods
430 
431   /***
432    * Overrides doStartService from GSARepository to make the repository
433    * optionally create required tables and load data using the TemplateParser
434    * -import flag.
435    * 
436    * @exception RepositoryException
437    *              (?)
438    */
439   public void doStartService() {
440     // Loading Column Infos in a separate thread
441     // can deadlock the Initializing GSA
442     if (isLoggingInfo())
443       logInfo("Setting loadColumnInfosInSeparateThread to false.");
444     setLoadColumnInfosInSeparateThread(false);
445     // if this is the temporary instantiation, we just want to
446     // call super.doStartService() and return
447     if (isTemporaryInstantiation()) {
448       if (isLoggingInfo())
449         logInfo("Restarting the GSA component to successfully load XML templates...");
450       super.doStartService();
451       return;
452     }
453 
454     try {
455       // otherwise, this is the 'real' instantiation and we want to
456       // do more....
457 
458       if (isLoggingInfo())
459         logInfo("\nInitializing the primary GSA component and checking tables...");
460       if (isLoggingDebug() && this.getDebugLevel() <= 6)
461         logDebug("For additional debugging statements, set debugLevel > 6");
462 
463       // make sure mappings for user specified SQL files are ok
464       validateUserSpecifiedSqlFiles();
465 
466       // we set logError and checkTables to false because tables
467       // probably won't exist and it'll just throw a bunch of
468       // unnecessary errors. also, we don't want to see errors because
469       // add-items, delete-items, etc. will fail
470       boolean logErrors = isLoggingError();
471       boolean checkTables = isCheckTables();
472       boolean logWarnings = isLoggingWarning();
473       setCheckTables(false);
474       setLoggingError(true);
475 
476       // also set 'loadColumnInfosAtStartup' to false to prevent attempts at
477       // loading
478       // lots of unwanted metadata. that's very time consuming and only needed
479       // by the
480       // final instantiation. The setLoadColumnInfosAtStartup method is new so
481       // use a
482       // try / catch in case we're dealing with an old version of GSARepository
483       boolean loadColumnInfosAtStartup = true;
484       try {
485         loadColumnInfosAtStartup = isLoadColumnInfosAtStartup();
486         setLoadColumnInfosAtStartup(isLoadColumnInfosAtInitialStartup());
487         if (isLoadColumnInfosAtInitialStartup()) {
488           if (isLoggingInfo())
489             logInfo("Enabled loading of column info for initial startup");
490         } else {
491           if (isLoggingInfo())
492             logInfo("Disabled loading of column info for initial startup");
493         }
494       } catch (Throwable t) {
495         if (isLoggingDebug())
496           logDebug("Could not modify loading of column metadata for preliminary startup.");
497       }
498 
499       // call GSA.doStartService to load XML definition files
500       super.doStartService();
501 
502       // reset 'LoadColumnInfosAtStartup' to whatever it was originally
503       try {
504         setLoadColumnInfosAtStartup(loadColumnInfosAtStartup);
505       } catch (Throwable t) {
506         logError(t);
507       }
508 
509       // reset check tables and loggingError
510       setCheckTables(checkTables);
511       setLoggingError(logErrors);
512       setLoggingWarning(logWarnings);
513 
514       // now create the tables and restart the repository
515       boolean createdTables = createTables();
516       if (isRestartingAfterTableCreation())
517         restart();
518 
519       // it's a little hidden, but when we just called restart(),
520       // we actually started up a temporary instantiation of this GSA
521       // which should have successfully loaded it's XML definition file
522       // because the tables were already created. This component
523       // (the primary one) then copied all the properties from the
524       // temporary instantiation so it's just like this component loaded
525       // the XML definition file successfully :)
526 
527       // we're now ready to import specified XML files
528       if (isImportEveryStartup() || createdTables)
529         importFiles();
530       else {
531         if (isLoggingInfo())
532           logInfo("Import not performed because importEveryStartup is false and no tables were created.");
533       }
534 
535       if (isLoggingInfo())
536         logInfo("Component finished starting up.");
537 
538     } catch (Exception e) {
539       logError("Caught an unexpected exception trying to start component...", e);
540     }
541   }
542 
543   // -----------------------------------------
544   /***
545    * Restarts the repository. This involves re-reading nucleus properties,
546    * reloading definition files, and invalidating all cache entries. This method
547    * is a convenience for development purposes (to avoid restarting dynamo when
548    * a template has changed), and should not be used on a live site. This method
549    * is modified slightly from the restart method of GSARepository because it
550    * sets mTemporaryInstantiation to true so that the doStartService method of
551    * the new instance does not reload import files or try to recreate tables
552    */
553   public boolean restart() throws ServiceException {
554     Configuration c = getServiceConfiguration();
555     NucleusNameResolver r = new NucleusNameResolver(getNucleus(), getNucleus(),
556         getNameContext(), true);
557     InitializingGSA newRepository = (InitializingGSA) c.createNewInstance(this);
558     c.configureService(newRepository, r, this);
559 
560     // Fool this new repository into thinking that it has been
561     // bound to the same name context as the original repository
562     // This changes will make sure that getAbsoluteName() returns
563     // a correct value.
564     NameContext nc = ((GenericService) this).getNameContext();
565     NameContextBindingEvent bindingEvent = new NameContextBindingEvent(this
566         .getName(), newRepository, this.getNameContext());
567     newRepository.nameContextElementBound(bindingEvent);
568 
569     ServiceEvent ev = new ServiceEvent(this, newRepository, getNucleus(), c);
570     /*
571      * We are purposefully not putting the new repository into the parent's name
572      * context. The existing repository is always the valid one. We're starting
573      * this new guy, then we're going to synchronize on the repository and get
574      * all of its info into us.
575      */
576 
577     // we have to set the new repository as temporary so it won't call
578     // restart and start an infinite recursion
579     newRepository.setIsTemporaryInstantiation(true);
580 
581     newRepository.startService(ev);
582     if (newRepository.isRunning()) {
583       synchronized (this) {
584         invalidateCaches();
585         copyFromOtherRepository(newRepository);
586       }
587       return true;
588     } else
589       return false;
590   }
591 
592   /***
593    * This method is called when the repository is shutdown. If
594    * dropTablesAtShutdown is true, it will attempt to drop all the tables.
595    * IMPORTANT: There is an optional property that can be set to indicate that
596    * all tables should be dropped at shutdown (dropTablesAtShutdown). Because of
597    * the order in which Nucleus shuts down the components, this may or may not
598    * work. It just depends on whether the datasource is shutdown before the
599    * repository. If you want to guarantee that your tables are dropped, manually
600    * invoke the doStopService method from the HTML admin pages.
601    */
602   public void doStopService() {
603     try {
604       // clear out state in SchemaTracker
605       SchemaTracker.getSchemaTracker().reset();
606       if (isDropTablesAtShutdown()) {
607         if (isLoggingInfo())
608           logInfo("Dropping tables because 'dropTablesAtShutdown' is true....");
609         dropTables();
610       }
611     } catch (Exception e) {
612       if (isLoggingError())
613         logError(e);
614     } finally {
615       super.doStopService();
616     }
617   }
618 
619   /***
620    * This method drops all tables required by the GSARepository.
621    * 
622    * @exception RepositoryException
623    *              if an error occurs while retrieving a list of the tables
624    *              associated with the repository
625    * @exception SQLProcessorException
626    *              if an error occured trying to drop the tables
627    */
628   public void dropTables() throws RepositoryException, SQLProcessorException {
629     // execute SQL files, if specified
630     String[] dropFiles = getSpecifiedDropFiles();
631     if (dropFiles != null) {
632       if (isExecuteCreateAndDropScripts() && dropFiles != null)
633         executeSqlFiles(dropFiles, false);
634       else if (isLoggingInfo())
635         logInfo("Skipping execution of SQL scripts b/c property 'executeCreateAndDropScripts' is false or there are no drop scripts.");
636       return;
637     }
638 
639     // otherwise, just drop tables based on startSQLRepository SQL
640 
641     if (isUseDDLUtils()) {
642       if (!Nucleus.getGlobalNucleus().isStopping()) {
643         // build a new one
644         mGenerator = new GSARepositorySchemaGenerator(this);
645       }
646 
647       try {
648         if (mGenerator != null)
649           mGenerator.dropSchema(true);
650       } catch (DatabaseOperationException e) {
651         throw new RepositoryException(e);
652       } catch (SQLException e) {
653         throw new RepositoryException(e);
654       }
655     } else {
656       Vector statements = getCreateStatements(null, null);
657     SQLProcessorEngine processor = getSQLProcessor();
658     processor.dropTablesFromCreateStatements(statements);
659 
660     }
661   }
662 
663   /***
664    * This method creates the tables required by the GSARepository. If desired,
665    * check to make sure all the tables exist in the database. If a table doesn't
666    * exist, create it; if it does exist, don't do anything to it unless user
667    * wants to drop existing tables
668    * 
669    * @return boolean - true if tables were created
670    * @exception RepositoryException
671    *              if an error occurs while retrieving a list of the tables to
672    *              create
673    * @exception SQLProcessorException
674    *              if an error occured trying to create the tables
675    */
676   private boolean createTables() throws RepositoryException,
677       SQLProcessorException {
678     // execute SQL files, if specified
679     String[] createFiles = getSpecifiedCreateFiles();
680     if (createFiles != null) {
681       if (!isExecuteCreateAndDropScripts()) {
682         if (isLoggingInfo())
683           logInfo("Skipping execution of SQL scripts b/c property 'executeCreateAndDropScripts' is false.");
684         return false;
685       }
686       // before executing the createFiles we always execute the drop files
687       dropTables();
688       executeSqlFiles(createFiles, true);
689       return true;
690     }
691 
692     // otherwise, just execute sql from startSQLRepository
693     boolean createdTables = false;
694 
695     if (isUseDDLUtils()) {
696     if (isCreateTables()) {
697         mGenerator = new GSARepositorySchemaGenerator(this);
698         try {
699           mGenerator.createSchema(true, isDropTablesIfExist());
700           createdTables = true;
701         } catch (DatabaseOperationException e) {
702           throw new RepositoryException(e);
703         } catch (SQLException e) {
704           throw new RepositoryException(e);
705         }
706       }
707     } else {
708       // Use GSA Generated SQL
709       SQLProcessorEngine spe = getSQLProcessor();
710       // turn on debug for SQLProcessorEngine if GSA has debug on if
711       // (isLoggingDebug())
712         spe.setLoggingDebug(true);
713       Vector createStatements = getCreateStatements(null, null);
714       createdTables = spe.createTables(createStatements, isDropTablesIfExist());
715 
716     }
717 
718     return createdTables;
719   }
720 
721   /***
722    * This method imports files using the TemplateParser
723    * 
724    * @exception RepositoryException
725    *              if an error occured while importing one of the xml files.
726    */
727   private void importFiles() throws RepositoryException {
728     if (isLoggingInfo())
729       logInfo("Importing files...");
730 
731     String[] loadFiles = getImportFilesAsStrings();
732     // just exit if no files were specified
733     if (loadFiles == null) {
734       if (isLoggingInfo())
735         logInfo("No files specified for import.");
736       return;
737     }
738 
739     if (isLoggingDebug()) {
740       logDebug("The following files will be imported:");
741       for (int i = 0; i < loadFiles.length; i++) {
742         logDebug("file: " + loadFiles[i]);
743       }
744     }
745 
746     // now load the import files if they were specified
747     PrintWriter ps = new PrintWriter(System.out);
748     if (loadFiles != null && loadFiles.length > 0) {
749       try {
750         TemplateParser.importFiles(this, loadFiles, ps,
751             isImportWithTransaction());
752       } catch (Exception e) {
753         throw new RepositoryException(
754             "Exception caught importing files into repository.", e);
755       }
756     }
757   }
758 
759   /***
760    * This method is used to remove the 'references...' parts from sql generated
761    * by the GSA. Removing the references allows us to avoid problems of creating
762    * tables in the wrong order and also allows you to easily drop / recreate
763    * tables.
764    */
765   private String stripReferences(String pStr) {
766     if (isLoggingDebug()) {
767       logDebug("Removing references from SQL string...");
768       if (this.getDebugLevel() > 6)
769         logDebug("SQL string before references are removed: \n" + pStr);
770     }
771 
772     pStr = stripForeignKey(pStr);
773 
774     // must be of the following format
775     // fieldname data-type null references foo(id),
776     String ref = "references ";
777     String endRef = ",";
778 
779     StringBuffer sb = new StringBuffer();
780     int start = 0;
781     int end = 0;
782     end = pStr.indexOf(ref);
783 
784     // if unable to find "references ", try "REFERENCES " instead
785     if (end == -1) {
786       ref = ref.toUpperCase();
787       end = pStr.indexOf(ref);
788     }
789 
790     while (end != -1) {
791       String temp = pStr.substring(start, end);
792       sb.append(temp);
793       pStr = pStr.substring(end + ref.length());
794       start = pStr.indexOf(endRef);
795       end = pStr.indexOf(ref);
796     }
797     String temp2 = pStr.substring(start);
798     sb.append(temp2);
799 
800     if (isLoggingDebug())
801       logDebug("Final sql string -> references removed: \n" + sb.toString());
802 
803     return sb.toString();
804   }
805 
806   private String stripForeignKey(String pStr) {
807     if (isLoggingDebug()) {
808       logDebug("Removing Foreign Key from SQL string...");
809       if (this.getDebugLevel() > 6)
810         logDebug("SQL string before Foreign Key are removed: \n" + pStr);
811     }
812 
813     String key = "foreign key";
814     int flag = 0;
815     int end = 0;
816     end = pStr.toLowerCase().lastIndexOf(key);
817 
818     while (end != -1) {
819       flag = 1;
820       pStr = pStr.substring(0, end);
821       end = pStr.toLowerCase().lastIndexOf(key);
822     }
823     end = pStr.lastIndexOf(",");
824     if (flag == 0)
825       return pStr;
826     else
827       return pStr.substring(0, end) + " )";
828   }
829 
830   /***
831    * This method is used to retrieve all of the CREATE TABLE statements that are
832    * needed to generate tables for this GSA
833    * 
834    * @exception RepositoryException
835    *              if an error occurs with the Repository
836    */
837   private Vector getCreateStatements(PrintWriter pOut, String pDatabaseName)
838       throws RepositoryException {
839     Vector tableStatements = new Vector();
840     Vector indexStatements = new Vector();
841 
842     // use current database if none is supplied
843     if (pDatabaseName == null)
844       pDatabaseName = getDatabaseName();
845 
846     String[] descriptorNames = getItemDescriptorNames();
847     OutputSQLContext sqlContext = new OutputSQLContext(pOut);
848     GSAItemDescriptor itemDescriptors[];
849     DatabaseTableInfo dti = getDatabaseTableInfo(pDatabaseName);
850     int i, length = descriptorNames.length;
851 
852     itemDescriptors = new GSAItemDescriptor[length];
853     for (i = 0; i < length; i++) {
854       itemDescriptors[i] = (GSAItemDescriptor) getItemDescriptor(descriptorNames[i]);
855     }
856 
857     String create = null;
858     String index = null;
859     for (i = 0; i < length; i++) {
860       GSAItemDescriptor desc = itemDescriptors[i];
861       Table[] tables = desc.getTables();
862       if (tables != null) {
863         for (int j = 0; j < tables.length; j++) {
864           Table t = tables[j];
865           if (!t.isInherited()) {
866             sqlContext.clear();
867             create = t.generateSQL(sqlContext, pDatabaseName);
868             // get rid of any possible CREATE INDEX statements and store those
869             // in their own Vector of statements...
870             index = extractIndexStatement(create);
871             create = removeIndexStatements(create);
872             if (isStripReferences())
873               create = stripReferences(create);
874             if (index != null && !indexStatements.contains(index))
875               indexStatements.add(index);
876             if (create != null && !tableStatements.contains(create))
877               tableStatements.add(create);
878           }
879         }
880       }
881     }
882     /*
883      * if (pOut != null) { pOut.print(buffer); pOut.flush(); }
884      */
885 
886     return tableStatements;
887   }
888 
889   /***
890    * This method is used to extract a possible CREATE INDEX statement from a
891    * CREATE TABLE statement that is generated by a Table. If no CREATE INDEX
892    * statement is included, it returns null
893    */
894   private String extractIndexStatement(String pStatement) {
895     String search = "CREATE INDEX ";
896     String copy = pStatement.toUpperCase();
897     int i = copy.indexOf(search);
898     if (i != -1)
899       return stripTrailingSemiColon(pStatement.substring(i));
900 
901     return null;
902   }
903 
904   /***
905    * This method is used to remove any possible CREATE INDEX statements from the
906    * end of a CREATE TABLE statement generated by a Table. It returns the CREATE
907    * TABLE statement with all CREATE INDEX statements removed.
908    */
909   private String removeIndexStatements(String pStatement) {
910     String search = "CREATE INDEX ";
911     String copy = pStatement.toUpperCase();
912     int i = copy.indexOf(search);
913     if (i != -1)
914       pStatement = pStatement.substring(0, i);
915 
916     return stripTrailingSemiColon(pStatement);
917   }
918 
919   /***
920    * This method is used to remove the trailing semicolon from a String. It is
921    * assumed that these strings will only possibly have one semicolon, and that
922    * if there is one everything after the semicolon is junk.
923    */
924   private String stripTrailingSemiColon(String pStr) {
925     if (pStr == null)
926       return pStr;
927     int idx = pStr.indexOf(";");
928     if (idx != -1)
929       pStr = pStr.substring(0, idx);
930 
931     return pStr;
932   }
933 
934   // ---------- methods to help with user-specified SQL files -----------
935   // allowable db types to specify
936   public String SOLID = "solid";
937   public String ORACLE = "oracle";
938   public String MICROSOFT = "microsoft";
939   public String INFORMIX = "informix";
940   public String DB2 = "db2";
941   public String SYBASE = "sybase";
942   public String SYBASE2 = "Adaptive Server Enterprise"; // sybase 12.5
943   public String DEFAULT = "default";
944   private String[] dbTypes = { SOLID, ORACLE, MICROSOFT, INFORMIX, DB2, SYBASE,
945       SYBASE2, DEFAULT };
946 
947   /***
948    * returns the dbtype for the database being used. returned value will be one
949    * of the constants SOLID, ORACLE, MICROSOFT, INFORMIX, DB2, SYBASE, or
950    * DEFAULT if db type can not be determined.
951    */
952   private String getDatabaseType() {
953     String type = getDatabaseName();
954     for (int i = 0; i < dbTypes.length; i++) {
955       if (type.toLowerCase().indexOf(dbTypes[i].toLowerCase()) > -1) {
956         if (dbTypes[i].equals(SYBASE2))
957           return SYBASE;
958         return dbTypes[i];
959       }
960     }
961     return DEFAULT;
962   }
963 
964   /***
965    * returns array of user-specified SQL files that should be executed, or null
966    * if output from startSQLRepository should be used.
967    * 
968    * @exception RepositoryException
969    *              if an error occurs getting the array of files to execute
970    */
971   private String[] getSpecifiedCreateFiles() throws RepositoryException {
972     // try to get mapped value for this specific db type, and if it's empty try
973     // the default
974     String files = (String) getSqlCreateFiles().get(getDatabaseType());
975     if (files == null)
976       files = (String) getSqlCreateFiles().get(DEFAULT);
977     // if it's still empty then just return b/c there's nothing to execute
978     if (files == null)
979       return null;
980 
981     // if file list is not null, convert it and return the array
982     try {
983       return TestUtils.convertFileArray(files, ":");
984     } catch (Exception e) {
985       throw new RepositoryException(e);
986     }
987   }
988 
989   /***
990    * returns array of user-specified SQL files that should be executed, or null
991    * if output from startSQLRepository should be used.
992    * 
993    * @exception RepositoryException
994    *              if an error occurs getting the array of files to execute
995    */
996   private String[] getSpecifiedDropFiles() throws RepositoryException {
997     // try to get mapped value for this specific db type, and if it's empty try
998     // the default
999     String files = (String) getSqlDropFiles().get(getDatabaseType());
1000     if (files == null)
1001       files = (String) getSqlDropFiles().get(DEFAULT);
1002     // if it's still empty then just return b/c there's nothing to execute
1003     if (files == null)
1004       return null;
1005 
1006     // if file list is not null, convert it and return the array
1007     try {
1008       return TestUtils.convertFileArray(files, ":");
1009     } catch (Exception e) {
1010       throw new RepositoryException(e);
1011     }
1012   }
1013 
1014   /***
1015    * verifies that SQL files specified by user are ok. in particular, that if
1016    * the user mapped a 'createSqlFile' for a db type there is a corresponding
1017    * 'dropSqlFile' entry, and vice-versa.
1018    * 
1019    * @exception RepositoryException
1020    *              if anything is wrong
1021    */
1022   private void validateUserSpecifiedSqlFiles() throws RepositoryException {
1023     // don't let them be null
1024     if (getSqlCreateFiles() == null)
1025       setSqlCreateFiles(new Properties());
1026     if (getSqlDropFiles() == null)
1027       setSqlDropFiles(new Properties());
1028     // make sure all the keys are valid
1029     Set keys = new HashSet();
1030     keys.addAll(getSqlCreateFiles().keySet());
1031     keys.addAll(getSqlDropFiles().keySet());
1032     Set allow_keys = new HashSet();
1033     for (int i = 0; i < dbTypes.length; i++) {
1034       keys.remove(dbTypes[i]);
1035       if (!dbTypes[i].equals(SYBASE2))
1036         allow_keys.add(dbTypes[i]);
1037     }
1038     if (keys.size() > 0)
1039       throw new RepositoryException(
1040           "The following keys used in the 'sqlCreateFiles' and/or 'sqlDropFiles' properties "
1041               + "are invalid: " + keys + ".  Allowable keys are: " + allow_keys);
1042 
1043     boolean isDefaultCreate = (getSqlCreateFiles().get(DEFAULT) != null);
1044     boolean isDefaultDrop = (getSqlDropFiles().get(DEFAULT) != null);
1045     // if there are defaults it will always be ok, so just return
1046     if (isDefaultCreate && isDefaultDrop)
1047       return;
1048 
1049     // otherwise, check each dbType individually
1050     for (int i = 0; i < dbTypes.length; i++) {
1051       boolean isCreate = (getSqlCreateFiles().get(dbTypes[i]) != null);
1052       boolean isDrop = (getSqlDropFiles().get(dbTypes[i]) != null);
1053       if (!isAllowNoDrop()) {
1054         if (isCreate && !isDrop && !isDefaultDrop)
1055           throw new RepositoryException(
1056               "Mapping exists for database type "
1057               + dbTypes[i]
1058                         + " in property 'sqlCreateFiles', but not in property 'sqlDropFiles', and "
1059                         + "there is no default specified.");
1060         if (isDrop && !isCreate && !isDefaultCreate)
1061           throw new RepositoryException(
1062               "Mapping exists for database type "
1063               + dbTypes[i]
1064                         + " in property 'sqlDropFiles', but not in property 'sqlCreateFiles', and "
1065                         + "there is no default specified.");
1066       }
1067     }
1068   }
1069 
1070   /***
1071    * executes the specified SQL files against this Repository's DataSource.
1072    * 
1073    * @param String
1074    *          [] the files to execute
1075    * @param boolean true if execution should stop at first error. if false, then
1076    *        a warning will be printed for encountered errors.
1077    * @exception RepositoryException
1078    *              if pStopAtError is true and an error occurs while executing
1079    *              one of the sql statements.
1080    */
1081   private void executeSqlFiles(String[] pFiles, boolean pStopAtError)
1082       throws RepositoryException {
1083     SQLProcessor sp = new SQLProcessor(getTransactionManager(), getDataSource());
1084     boolean success = false;
1085     TransactionDemarcation td = new TransactionDemarcation();
1086     try {
1087       td.begin((TransactionManager) Nucleus.getGlobalNucleus().resolveName(
1088           "/atg/dynamo/transaction/TransactionManager"));
1089 
1090       // for sql server auto-commit must be true
1091       // adamb: Hmm Marty added this, but it
1092       // breaks against MSSQL 8
1093       // if (getDatabaseType().equals(MICROSOFT))
1094       //  sp.setAutoCommit(true);
1095       SQLFileParser parser = new SQLFileParser();
1096       for (int i = 0; i < pFiles.length; i++) {
1097         String file = pFiles[i];
1098         // switch the file path so everything is forward slashes
1099         file = file.replace('//', '/');
1100         String cmd = null;
1101         Iterator cmds = null;
1102         if (isLoggingInfo())
1103           logInfo("Executing SQL file: " + file);
1104         if (!new File(file).exists())
1105           throw new RepositoryException("SQL file " + file + " does not exist.");
1106         
1107         // parse the file to get commands...
1108         try {
1109           Collection c = parser.parseSQLFile(file);
1110           if (isLoggingDebug())
1111             logDebug("Parsed " + c.size() + " SQL command(s) from file.");
1112           cmds = c.iterator();
1113         } catch (Exception e) {
1114           // an error parsing the file indicates something very wrong, so bail
1115           throw new RepositoryException("Error encountered parsing SQL file "
1116               + file, e);
1117         }
1118         
1119         // then execute the commands...
1120         while (cmds.hasNext()) {
1121           cmd = (String) cmds.next();
1122           if (cmd.trim().length() == 0)
1123             continue;
1124           if (isLoggingDebug() || isLoggingCreateTables())
1125             logDebug("Executing SQL cmd [" + cmd + "]");
1126           try {
1127             sp.executeSQL(cmd);
1128           } catch (Exception e) {
1129             if (pStopAtError) {
1130               throw new RepositoryException(
1131                   "Error received executing command [" + cmd
1132                       + "] from SQL file " + file, e);
1133             } else {
1134               if (isLoggingWarning())
1135                 logWarning("Error received executing command [" + cmd
1136                     + "] from SQL file " + file + ": " + e.getMessage());
1137             }
1138           }
1139         }
1140       }
1141       success = true;
1142     } catch (TransactionDemarcationException e) {
1143       logError(e);
1144     } finally {
1145       try {
1146         td.end(!success);
1147       } catch (TransactionDemarcationException e) {
1148        logError(e);
1149       }
1150     }
1151   }
1152 
1153 } // end of class
1154