View Javadoc

1   /***
2    * Copyright 2007 ATG DUST Project
3    * 
4    * Licensed under the Apache License, Version 2.0 (the "License");
5    * you may not use this file except in compliance with the License.
6    * 
7    * You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
8    * 
9    * Unless required by applicable law or agreed to in writing, software 
10   * distributed under the License is distributed on an "AS IS" BASIS,
11   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12   * See the License for the specific language governing permissions and limitations under the License.
13   */
14  
15  package atg.adapter.gsa;
16  
17  import java.io.File;
18  import java.io.PrintWriter;
19  import java.util.ArrayList;
20  import java.util.Collection;
21  import java.util.HashSet;
22  import java.util.Iterator;
23  import java.util.List;
24  import java.util.Properties;
25  
26  import org.apache.log4j.Logger;
27  
28  import atg.adapter.gsa.xml.TemplateParser;
29  import atg.adapter.gsa.xml.VersioningContextUtil;
30  import atg.adapter.version.VersionRepository;
31  import atg.junit.nucleus.TestUtils;
32  import atg.naming.NameContextBindingEvent;
33  import atg.nucleus.Configuration;
34  import atg.nucleus.GenericService;
35  import atg.nucleus.NucleusNameResolver;
36  import atg.nucleus.ServiceEvent;
37  import atg.nucleus.ServiceException;
38  import atg.nucleus.logging.LogListener;
39  import atg.repository.RepositoryException;
40  
41  /***
42   * This class is an extension of atg.adapter.version.VersionRepository.  
43   * Currently, it does not support to create or drop tables based on 
44   * startSQLRepository SQL. It only support to create or drop 
45   * tables through specified sql scripts. 
46   * 
47   *    For example:
48   * 
49   *    sqlCreateFiles=oracle={atg.dynamo.root}/SystemTests/bizuiTest/sql/install/oracle/testrepver_ddl.sql
50   *    sqlDropFiles=oracle={atg.dynamo.root}/SystemTests/bizuiTest/sql/install/oracle/drop_testrepver_ddl.sql
51   *
52   * Additionally, it uses the GSA's import facility to allow data to be loaded 
53   * into the tables after creation. But you have to give relevant arguments.
54   * There are two set of arguments you can choose. 
55   * 
56   * 1. specify projectName, user and comment/doCheckin(=false)
57   * 
58   *    For example:
59   * 
60   *    importFiles={atg.dynamo.root}/SystemTests/BizuiTest/data/test-repository-data.xml
61   *    projectName=newproject
62   *    user=publishing
63   *    comment=ImportDataWithProjectName
64   * 
65   * 2. specify workspaceIdPrefix, branchId, comment/doCheckin(=false)
66   * 
67   *    For example:
68   * 
69   *     importFiles={atg.dynamo.root}/SystemTests/BizuiTest/data/test-repository-data.xml
70   *     workspaceIdPrefix=TestRepImport
71   *     branchId=main
72   *     comment=hello
73   *     doCheckin=true
74   * 
75   * @author adwivedi
76   * @author qma
77   * 
78   *  Note: this class is modified from InitializingGSA.java. It has most properties of InitializingGSA.
79   *  You can see the detail info from the following link: 
80   *  http://atlas.atg.com/mfrenzel/apiauto/APIAutoTest.html#initializinggsa
81   *  
82   */
83  
84  public class InitializingVersionRepository extends VersionRepository {
85    
86    private static Logger log = Logger.getLogger(InitializingVersionRepository.class);
87    
88    // -----------------------------------
89    // ---From Properties File------------	
90    // do we want to create tables if they don't exist
91    private boolean mCreateTables = true;
92  
93    public void setCreateTables(boolean pCreate) {
94      mCreateTables = pCreate;
95    }
96  
97    public boolean isCreateTables() {
98      return mCreateTables;
99    }
100 
101   // do we want to drop tables that exist if we want to create
102   // a table with the same name
103   private boolean mDropTables = false;
104 
105   public void setDropTablesIfExist(boolean pDrop) {
106     mDropTables = pDrop;
107   }
108 
109   public boolean isDropTablesIfExist() {
110     return mDropTables;
111   }
112 
113   // the XML files containing export data from the TemplateParser
114   // it will be imported into the database after tables are created
115   // we load the files as Files instead of XMLFiles because the
116   // TemplateParser requires a full file path to the import file
117   // instead of using the CONFIGPATH
118   private File[] mImportFiles = null;
119 
120   public void setImportFiles(File[] pFiles) {
121     mImportFiles = pFiles;
122   }
123 
124   public File[] getImportFiles() {
125     return mImportFiles;
126   }
127 
128   public String[] getImportFilesAsStrings() {
129     File[] f = getImportFiles();
130     if (f == null)
131       return null;
132 
133     List<String> v = new ArrayList<String>();
134     for (int i = 0; i < f.length; i++) {
135       if (!v.contains(f[i].getAbsolutePath()))
136         v.add(f[i].getAbsolutePath());
137     }
138 
139     return (String[]) v.toArray(new String[v.size()]);
140   }
141 
142   // do we want to strip the 'references(..)' statements from SQL
143   // created by the GSA
144   private boolean mStripReferences = false;
145 
146   public void setStripReferences(boolean pStrip) {
147     mStripReferences = pStrip;
148   }
149 
150   public boolean isStripReferences() {
151     return mStripReferences;
152   }
153 
154   // do we want to show the create table statements that are executed
155   private boolean mShowCreate = false;
156 
157   public void setloggingCreateTables(boolean pLog) {
158     mShowCreate = pLog;
159   }
160 
161   public boolean isLoggingCreateTables() {
162     return mShowCreate;
163   }
164 
165   /* the SQLProcessorEngine to use for creating tables
166    * this property is optional because we'll create a default
167    * SQLProcessorEngine if the property isn't set
168    */
169   private SQLProcessorEngine mSQLProcessor = null;
170 
171   public void setSQLProcessor(SQLProcessorEngine pEngine) {
172     mSQLProcessor = pEngine;
173   }
174 
175   public SQLProcessorEngine getSQLProcessor() {
176     // create a new processor if one isn't set
177     if (mSQLProcessor == null) {
178       mSQLProcessor = new SQLProcessorEngine(this);
179       mSQLProcessor.setLoggingDebug(this.isLoggingDebug());
180       mSQLProcessor.setLoggingError(this.isLoggingError());
181       mSQLProcessor.setLoggingInfo(this.isLoggingInfo());
182       mSQLProcessor.setLoggingWarning(this.isLoggingWarning());
183       LogListener[] listeners = this.getLogListeners();
184       for (int i = 0; i < listeners.length; i++) {
185         mSQLProcessor.addLogListener(listeners[i]);
186       }
187     }
188 
189     return mSQLProcessor;
190   }
191 
192   /*** boolean indicating whether we should perform the import every time
193    * Dynamo starts, or only perform the import if we created at least
194    * one table.
195    * NOTE: if dropTablesIfExist is true, the import will occur every time
196    * because we will create tables every time.
197    * default: false - only perform the import when tables are created
198    */
199   private boolean mImportEveryStartup = false;
200 
201   public void setImportEveryStartup(boolean pImport) {
202     mImportEveryStartup = pImport;
203   }
204 
205   public boolean isImportEveryStartup() {
206     return mImportEveryStartup;
207   }
208 
209   /*** boolean indicating whether we should drop all tables associated with
210    *  this repository when Dynamo is shut down.
211    *  NOTE: this will only work properly is Dynamo is shutdown properly. It
212    *  will not work if Dynamo is just killed
213    *  default: false
214    */
215   private boolean mDropTablesAtShutdown = false;
216 
217   public void setDropTablesAtShutdown(boolean pDrop) {
218     mDropTablesAtShutdown = pDrop;
219   }
220 
221   public boolean isDropTablesAtShutdown() {
222     return mDropTablesAtShutdown;
223   }
224 
225   /*** boolean indicating whether to wrap each imported file in it's own transaction.
226    *  this is a new option in D5.5 that has changed the method signature of
227    *  atg.adapter.gsa.xml.TemplateParser.importFiles()
228    *  default: true
229    */
230   private boolean mImportWithTransaction = true;
231 
232   public void setImportWithTransaction(boolean pTran) {
233     mImportWithTransaction = pTran;
234   }
235 
236   public boolean isImportWithTransaction() {
237     return mImportWithTransaction;
238   }
239 
240   private Properties mSqlCreateFiles = new Properties();
241 
242   /*** Optional mapping of user-specified sql files that should be executed instead of
243    *  the SQL generated by startSQLRepository.  Key values must be one of (case sensitive):
244    *  <b>default</b>, <b>oracle</b>, <b>solid</b>, <b>informix</b>, <b>microsoft</b>,
245    *  <b>sybase</b>, or <b>db2</b>.  Mapped values should be a colon (:) separated
246    *  ordered list of files to execute for that database type.
247    *  <br>Specified files may use <pre>{....}</pre> notation to indicate a
248    *  System variable that should be substituted at runtime, such as <pre>{atg.dynamo.root}</pre>.
249    *  <p>The following behavior is observed:
250    *  <pre>
251    *          a) database meta data is used to determine specific database type
252    *          b) when <b>default</b> not specified
253    *            - if mapping exists for specific db type, those files are executed
254    *            - otherwise, output from startSQLRepository is executed
255    *          c) when <b>default</b> is specified
256    *            - if mapping exists for specific db type, those files are executed
257    *            - otherwise, files mapped under default are executed
258    *          d) if a mapping exists for a db type in 'sqlCreateFiles' then a corresponding
259    *             entry (to the specific db type, or to default) must exist.  Otherwise an exception
260    *             is thrown at starup.
261    *  </pre>
262    *  <p>Also, when a file specified in the property 'sqlCreateFiles' is used (i.e. output
263    *  from startSQLRepository is not being used) then the initializingGSA will always
264    *  do the following at startup, unless property 'executeCreateAndDropScripts' is set to false:
265    *  <pre>
266    *          a) execute the appropriate dropSqlFile(s)
267    *          b) execute the appropriate createSqlFile(s)
268    *  </pre>
269    *  If 'executeCreateAndDropScripts' is false then in the case where scripts normally would be run
270    *  they will instead be skipped and no SQL (from scripts or startSQLRepository) will be executed.
271    *  The reason for this restriction is that it's too difficult to know whether a database has
272    *  been properly reset for the 'createSqlFile(s)' to run properly, so we err on the conservative
273    *  side and always reset it.
274    */
275   public void setSqlCreateFiles(Properties pFiles) {
276     mSqlCreateFiles = pFiles;
277   }
278 
279   /*** returns optional mapping of user-specified sql files that should be executed instead of
280    *  the SQL generated by startSQLRepository.  see 'setSqlCreateFiles' for detailed
281    *  explanation of this property.
282    */
283   public Properties getSqlCreateFiles() {
284     return mSqlCreateFiles;
285   }
286 
287   private Properties mSqlDropFiles = new Properties();
288 
289   /*** returns optional mapping of user-specified sql files that should be executed during
290    *  'tear-down' instead of basing it on the SQL generated by startSQLRepository.  see
291    *  'setSqlCreateFiles' for detailed explanation of this property.
292    */
293   public void setSqlDropFiles(Properties pFiles) {
294     mSqlDropFiles = pFiles;
295   }
296 
297   /*** returns optional mapping of user-specified sql files that should be executed during
298    *  'tear-down' instead of basing it on the SQL generated by startSQLRepository.  see
299    *  'setSqlCreateFiles' for detailed explanation of this property.
300    */
301   public Properties getSqlDropFiles() {
302     return mSqlDropFiles;
303   }
304 
305   private boolean mExecuteCreateDropScripts = true;
306 
307   /*** if set to true then create and drop scripts mapped through properties 'setSqlCreateFiles'
308    *  and 'getSqlCreateFiles' will be executed.  otherwise the scripts will not be executed at
309    *  startup.
310    */
311   public void setExecuteCreateAndDropScripts(boolean pExec) {
312     mExecuteCreateDropScripts = pExec;
313   }
314 
315   /*** returns true if create and drop scripts mapped through properties 'setSqlCreateFiles'
316    *  and 'getSqlCreateFiles' should be executed at startup.
317    */
318   public boolean isExecuteCreateAndDropScripts() {
319     return mExecuteCreateDropScripts;
320   }
321 
322   private boolean mLoadColumnInfosAtInitialStartup = false;
323 
324   /*** returns true if the GSA should load JDBC metadata when starting the
325    * initial instantiation of the component.  default: false
326    */
327   public boolean isLoadColumnInfosAtInitialStartup() {
328     return mLoadColumnInfosAtInitialStartup;
329   }
330 
331   /*** set to true if the GSA should load JDBC metadata when starting the initial
332    * instantiation of the component.  the default is false b/c the initial instantiation
333    * is only used to create tables and loading the metadata before the tables are
334    * created is unnecessary overhead which slows the startup process.  When the
335    * component is restarted after the tables are created it uses the value of
336    * 'loadColumnInfosAtStartup' to determine whether to load the metadata on the restart. */
337   public void setLoadColumnInfosAtInitialStartup(boolean pLoad) {
338     mLoadColumnInfosAtInitialStartup = pLoad;
339   }
340 
341   // ------------------------------------------------------------------------
342   // properties for version repository import
343   private String mProjectName = null;
344 
345   public void setProjectName(String pProjectName) {
346     mProjectName = pProjectName;
347   }
348 
349   public String getProjectName() {
350     return mProjectName;
351   }
352 
353   private String mProjectType = "Standard";
354 
355   public void setProjectType(String pProjectType) {
356     mProjectType = pProjectType;
357   }
358 
359   public String getProjectType() {
360     return mProjectType;
361   }
362 
363   private String mUser = null;
364 
365   public void setUser(String pUser) {
366     mUser = pUser;
367   }
368 
369   public String getUser() {
370     return mUser;
371   }
372 
373   private String mWorkspaceIdPrefix = null;
374 
375   public void setWorkspaceIdPrefix(String pWorkspaceIdPrefix) {
376     mWorkspaceIdPrefix = pWorkspaceIdPrefix;
377   }
378 
379   public String getWorkspaceIdPrefix() {
380     return mWorkspaceIdPrefix;
381   }
382 
383   private String mBranchId = null;
384 
385   public void setBranchId(String pBranchId) {
386     mBranchId = pBranchId;
387   }
388 
389   public String getBranchId() {
390     return mBranchId;
391   }
392 
393   private String mComment = null;
394 
395   public void setComment(String pComment) {
396     mComment = pComment;
397   }
398 
399   public String getComment() {
400     return mComment;
401   }
402 
403   private boolean mDoCheckin = true;
404 
405   public void setDoCheckin(boolean pDoCheckin) {
406     mDoCheckin = pDoCheckin;
407   }
408 
409   public boolean getDoCheckin() {
410     return mDoCheckin;
411   }
412 
413   public boolean isDoCheckin() {
414     return mDoCheckin;
415   }
416 
417   public boolean mRestartAfterTableCreation = true;
418 
419   /***
420    * Returns true if this repository will attempt to
421    * "restart" after creating tables.
422    * @return
423    */
424   public boolean isRestartingAfterTableCreation() {
425     return mRestartAfterTableCreation;
426   }
427 
428   /***
429    * Sets if this repository will attempt to
430    * "restart" after creating tables.
431    * A value of true means that it should restart.
432    */
433   public void setRestartingAfterTableCreation(boolean pRestart) {
434     mRestartAfterTableCreation = pRestart;
435   }
436 
437   //-------------------------------------------------------------------------
438   // Member properties
439 
440   // this property is a little tricky and a bit of a hack, but it
441   // allows us to create the tables, etc on startup.  When the component
442   // is initially started this will be false, but when it calls restart,
443   // we set it to true for the new instantiation to avoid infinitely
444   // recursing into new repositories
445   private boolean mTemporaryInstantiation = false;
446 
447   public void setIsTemporaryInstantiation(boolean pTemp) {
448     mTemporaryInstantiation = pTemp;
449   }
450 
451   private boolean isTemporaryInstantiation() {
452     return mTemporaryInstantiation;
453   }
454 
455   //-------------------------------------------------------------------------
456   // Methods
457 
458   /*** Overrides doStartService from VersionRepository to make the
459    * repository optionally create required tables by specified sql scripts and load data
460    * using the TemplateParser -import flag.
461    *
462    * @exception RepositoryException (?)
463    *
464    */
465   public void doStartService() {
466 
467     // if this is the temporary instantiation, we just want to
468     // call super.doStartService() and return
469     if (isTemporaryInstantiation()) {
470       if (isLoggingInfo())
471         logInfo("Restarting the Versioned GSA component to successfully load XML templates...");
472       super.doStartService();
473       return;
474     }
475     // also set 'loadColumnInfosAtStartup' to false to prevent attempts at
476     // loading lots of unwanted metadata. that's very time consuming and only needed
477     // by the final instantiation. The setLoadColumnInfosAtStartup method is new so
478     // use a try/catch in case we're dealing with an old version of GSARepository
479     boolean loadColumnInfosAtStartup = true;
480     try {
481       loadColumnInfosAtStartup = isLoadColumnInfosAtStartup();
482       setLoadColumnInfosAtStartup(isLoadColumnInfosAtInitialStartup());
483       if (isLoadColumnInfosAtInitialStartup()) {
484         if (isLoggingInfo())
485           logInfo("Enabled loading of column info for initial startup");
486       } else {
487         if (isLoggingInfo())
488           logInfo("Disabled loading of column info for initial startup");
489       }
490     } catch (Throwable t) {
491       if (isLoggingDebug())
492         logDebug("Could not modify loading of column metadata for preliminary startup.");
493     }
494     setLoggingWarning(false);
495     setLoggingError(true);
496     // call GSA.doStartService to load XML definition files
497     super.doStartService();
498     setLoggingError(true);
499     setLoggingWarning(true);
500 
501     // reset 'LoadColumnInfosAtStartup' to whatever it was originally
502     try {
503       setLoadColumnInfosAtStartup(loadColumnInfosAtStartup);
504     } catch (Throwable t) {
505     }
506 
507     try {
508       // now create the tables and restart the repository
509       //       setLoggingDebug(true);
510       boolean createdTables = createTables();
511       //      boolean old = isDropTablesAtShutdown();
512       //      setDropTablesAtShutdown(false);
513       //      super.doStopService();
514       //      setDropTablesAtShutdown(old);
515       //      super.doStartService();
516       if (isRestartingAfterTableCreation()) {
517         restart();
518       }
519       // we're now ready to import specified XML files
520       if (isImportEveryStartup() || createdTables)
521         importFiles();
522       else {
523         if (isLoggingInfo())
524           logInfo("Import not performed because importEveryStartup is false and no tables were created.");
525       }
526 
527     } catch (Exception e) {
528       logError(
529           "Caught an unexpected exception trying to create tables or importFiles ...",
530           e);
531     }
532 
533   }
534 
535   //-----------------------------------------
536   /***
537    * Restarts the repository. This involves re-reading nucleus properties,
538    * reloading definition files, and invalidating all cache entries. This method
539    * is a convenience for development purposes (to avoid restarting dynamo when
540    * a template has changed), and should not be used on a live site.
541    * 
542    * This method is modified slightly from the restart method of GSARepository
543    * because it sets mTemporaryInstantiation to true so that the doStartService
544    * method of the new instance does not reload import files or try to recreate
545    * tables
546    */
547   public boolean restart() throws ServiceException {
548     Configuration c = getServiceConfiguration();
549     NucleusNameResolver r = new NucleusNameResolver(getNucleus(), getNucleus(),
550         getNameContext(), true);
551     InitializingVersionRepository newRepository = (InitializingVersionRepository) c.createNewInstance(this);
552     c.configureService(newRepository, r, this);
553 
554     // Fool this new repository into thinking that it has been
555     // bound to the same name context as the original repository
556     // This changes will make sure that getAbsoluteName() returns
557     // a correct value.
558     ((GenericService) this).getNameContext();
559     new NameContextBindingEvent(this.getName()+"_ver", getWrappedRepository(), this.getNameContext());
560 //    newRepository.nameContextElementBound(bindingEvent);
561 //    nc.removeElement(this.getName()+"_ver");
562 //    super.setWrappedRepository(null);
563 
564     ServiceEvent ev = new ServiceEvent(this, getWrappedRepository(), getNucleus(), c);
565     /*
566      * We are purposefully not putting the new repository into the parent's name
567      * context. The existing repository is always the valid one. We're starting
568      * this new guy, then we're going to synchronize on the repository and get
569      * all of its info into us.
570      */
571 
572     // we have to set the new repository as temporary so it won't call
573     // restart and start an infinite recursion
574     newRepository.setIsTemporaryInstantiation(true);
575 
576     getWrappedRepository().startService(ev);
577 //    bindingEvent = new NameContextBindingEvent(this
578 //        .getName()+"_ver", newRepository, this.getNameContext());
579 //    newRepository.getWrappedRepository().nameContextElementUnbound(bindingEvent);    
580     if (newRepository.isRunning()) {
581       synchronized (this) {
582         invalidateCaches();
583         copyFromOtherRepository(newRepository);
584       }
585       return true;
586     } else
587       return false;
588   }
589 
590   /*** This method is called when the repository is shutdown.  If dropTablesAtShutdown
591    *  is true, it will attempt to drop all the tables.
592    *  IMPORTANT: There is an optional property that can be set to indicate that all tables
593    *  should be dropped at shutdown (dropTablesAtShutdown).  Because of the order in which
594    *  Nucleus shuts down the components, this may or may not work.  It just depends on whether
595    *  the datasource is shutdown before the repository.  If you want to guarantee that
596    *  your tables are dropped, manually invoke the doStopService method from the HTML admin
597    *  pages.
598    */
599   public void doStopService() {
600     try {
601       if (isDropTablesAtShutdown()) {
602         if (isLoggingInfo())
603           logInfo("Dropping tables because 'dropTablesAtShutdown' is true....");
604         dropTables();
605       }
606     } catch (Exception e) {
607       if (isLoggingError())
608         logError(e);
609     } finally {
610       super.doStopService();
611     }
612   }
613 
614   /*** This method drops all tables required by the GSARepository.
615    *
616    * @exception RepositoryException if an error occurs while retrieving a
617    * list of the tables associated with the repository
618    * @exception SQLProcessorException if an error occured trying to
619    * drop the tables
620    */
621   public void dropTables() throws RepositoryException, SQLProcessorException {
622     // execute SQL files, if specified
623     String[] dropFiles = getSpecifiedDropFiles();
624     if (dropFiles != null) {
625       if (isExecuteCreateAndDropScripts())
626         executeSqlFiles(dropFiles, false);
627       else if (isLoggingInfo())
628         logInfo("Skipping execution of SQL scripts b/c property 'executeCreateAndDropScripts' is false.");
629       return;
630     }
631 
632     // otherwise, just drop tables based on startSQLRepository SQL  -- not implement yet.
633     if (isLoggingInfo())
634       logInfo("Can not drop tables based on startSQLRepositoryRepository SQL. Please specified DropFiles!");
635     return;
636   }
637 
638   /*** This method creates the tables required by the GSARepository.
639    * If desired, check to make sure all the tables exist in the
640    * database. If a table doesn't exist, create it;
641    * if it does exist, don't do anything to it unless user wants
642    * to drop existing tables
643    *
644    * @return boolean - true if tables were created
645    * @exception RepositoryException if an error occurs while retrieving a list of the tables
646    * to create
647    * @exception SQLProcessorException if an error occured trying to
648    * create the tables
649    */
650   private boolean createTables() throws RepositoryException,
651       SQLProcessorException {
652     // execute SQL files, if specified
653     String[] createFiles = getSpecifiedCreateFiles();
654     if (createFiles != null) {
655       if (!isExecuteCreateAndDropScripts()) {
656         if (isLoggingError())
657           logError("Skipping execution of SQL scripts b/c property 'executeCreateAndDropScripts' is false.");
658         return false;
659       }
660       // before executing the createFiles we always execute the drop files
661       String[] dropFiles = getSpecifiedDropFiles();
662       executeSqlFiles(dropFiles, false);
663       log.info(createFiles);
664       executeSqlFiles(createFiles, true);
665       return true;
666     }
667 
668     // otherwise, just execute sql from startSQLRepository
669     boolean createdTables = false;
670 
671     if (isCreateTables()) {
672       SQLProcessorEngine spe = getSQLProcessor();
673 
674       // turn on debug for SQLProcessorEngine if GSA has debug on
675       if (isLoggingDebug())
676         spe.setLoggingDebug(true);
677 
678       List<String> createStatements = getCreateStatements(null, null);
679       createdTables = spe.createTables(createStatements, isDropTablesIfExist());
680     }
681 
682     return createdTables;
683   }
684 
685   /***
686    * This method is used to retrieve all of the CREATE TABLE statements that are
687    * needed to generate tables for this GSA
688    * 
689    * @exception RepositoryException
690    *              if an error occurs with the Repository
691    */
692   private List<String> getCreateStatements(PrintWriter pOut, String pDatabaseName)
693       throws RepositoryException {
694     List<String> tableStatements = new ArrayList<String>();
695     List<String> indexStatements = new ArrayList<String>();
696 
697     // use current database if none is supplied
698     if (pDatabaseName == null)
699       pDatabaseName = getDatabaseName();
700 
701     String[] descriptorNames = getWrappedRepository().getItemDescriptorNames();
702     OutputSQLContext sqlContext = new OutputSQLContext(pOut);
703     GSAItemDescriptor itemDescriptors[];
704     //DatabaseTableInfo dti = getDatabaseTableInfo(pDatabaseName);
705     int i, length = descriptorNames.length;
706 
707     itemDescriptors = new GSAItemDescriptor[length];
708     for (i = 0; i < length; i++) {
709       itemDescriptors[i] = (GSAItemDescriptor) getWrappedRepository()
710           .getItemDescriptor(descriptorNames[i]);
711     }
712 
713     String create = null;
714     String index = null;
715     HashSet<String> tableNames = new HashSet<String>();
716     for (i = 0; i < length; i++) {
717       GSAItemDescriptor desc = itemDescriptors[i];
718       Table[] tables = desc.getTables();
719       if (tables != null) {
720         for (int j = 0; j < tables.length; j++) {
721           Table t = tables[j];
722 
723           if (!t.isInherited() && !tableNames.contains(t.getName())) {
724             sqlContext.clear();
725             create = t.generateSQL(sqlContext, pDatabaseName);
726             // get rid of any possible CREATE INDEX statements and store those
727             // in their own Vector of statements...
728             index = extractIndexStatement(create);
729             create = removeIndexStatements(create);
730             if (isStripReferences())
731               create = stripReferences(create);
732             if (index != null && !indexStatements.contains(index))
733               indexStatements.add(index);
734             if (create != null && !tableStatements.contains(create))
735               tableStatements.add(create);
736             tableNames.add(t.getName());
737           }
738         }
739       }
740     }
741     /*
742      * if (pOut != null) { pOut.print(buffer); pOut.flush(); }
743      */
744 
745     return tableStatements;
746   }
747 
748   /*** This method imports files using the TemplateParser
749    *
750    * @exception RepositoryException if an error occured while importing
751    * one of the xml files.
752    */
753   private void importFiles() throws RepositoryException {
754     if (isLoggingInfo())
755       logInfo("Importing files...");
756 
757     String[] loadFiles = getImportFilesAsStrings();
758     // just exit if no files were specified
759     if (loadFiles == null) {
760       if (isLoggingInfo())
761         logInfo("No files specified for import.");
762       return;
763     }
764 
765     if (isLoggingDebug()) {
766       logDebug("The following files will be imported:");
767       for (int i = 0; i < loadFiles.length; i++) {
768         logDebug("file: " + loadFiles[i]);
769       }
770     }
771 
772     // now load the import files if they were specified
773     PrintWriter ps = new PrintWriter(System.out);
774     if (loadFiles != null && loadFiles.length > 0) {
775       try {
776 
777         String pProjectName = getProjectName();
778         String pProjectType = getProjectType();
779         String pUser = getUser();
780         String pWorkspaceId = getWorkspaceIdPrefix();
781         String pBranchId = getBranchId();
782         String pComment = getComment();
783         boolean pDoCheckin = isDoCheckin();
784 
785         // check the versioning flags for correctness
786         if (isLoggingDebug())
787           logDebug("checking the versioning flags for correctness ... ");
788         if (isLoggingDebug())
789           logDebug("pProjectName = " + pProjectName);
790         if (isLoggingDebug())
791           logDebug("pProjectType = " + pProjectType);
792         if (isLoggingDebug())
793           logDebug("pUser = " + pUser);
794         if (isLoggingDebug())
795           logDebug("pWrokspaceId = " + pWorkspaceId);
796         if (isLoggingDebug())
797           logDebug("pBranchId = " + pBranchId);
798         if (isLoggingDebug())
799           logDebug("pComment = " + pComment);
800         if (isLoggingDebug())
801           logDebug("pDoCheckin = " + pDoCheckin);
802 
803         if (pProjectName == null && (pWorkspaceId == null || pBranchId == null)) {
804           if (pWorkspaceId == null) {
805             if (isLoggingError())
806               logError("Error: workspaceId required for a versioned import");
807           } else {
808             if (isLoggingError())
809               logError("Error: branchId required for a versioned import");
810           }
811           return;
812         } else if (pProjectName == null
813             && (pDoCheckin == true && pComment == null)) {
814           if (isLoggingError())
815             logError("Error: comment required for a versioned import");
816           return;
817         } else if (pProjectName != null && pUser == null) {
818           if (isLoggingError())
819             logError("Error: user required for a versioned import");
820           return;
821         } else if (pProjectName != null && pDoCheckin == true
822             && pComment == null) {
823           if (isLoggingError())
824             logError("Error: comment required for a versioned import");
825           return;
826         }
827 
828         //do importFiles
829         if (pProjectName != null) {
830           // If porjectName is supplied, we will use projectName, projectType, user to create a project and get its workspaceId.	
831           TemplateParser.importFiles(this, loadFiles, ps,
832               isImportWithTransaction(), VersioningContextUtil
833                   .versioningContextHelper(pProjectName, pProjectType, pUser,
834                       pWorkspaceId, pBranchId, pComment, pDoCheckin));
835 
836         } else {
837           // if workspaceId is supplied, we will add a random number after workspaceId to avoid duplicate.	
838           TemplateParser.importFiles(this, loadFiles, ps,
839               isImportWithTransaction(), VersioningContextUtil
840                   .versioningContextHelper(pWorkspaceId + StrictMath.random()
841                       * 10, pBranchId, pComment, pDoCheckin));
842         }
843 
844       } catch (Exception e) {
845         throw new RepositoryException(
846             "Exception caught importing files into repository.", e);
847       }
848     }
849   }
850 
851   /***
852    * This method is used to remove the 'references...' parts from
853    * sql generated by the GSA.  Removing the references allows us to
854    * avoid problems of creating tables in the wrong order and also
855    * allows you to easily drop / recreate tables.
856    */
857   private String stripReferences(String pStr) {
858     if (isLoggingDebug()) {
859       logDebug("Removing references from SQL string...");
860       if (this.getDebugLevel() > 6)
861         logDebug("SQL string before references are removed: \n" + pStr);
862     }
863 
864     pStr = stripForeignKey(pStr);
865 
866     // must be of the following format
867     // fieldname  data-type  null references foo(id),
868     String ref = "references ";
869     String endRef = ",";
870 
871     StringBuffer sb = new StringBuffer();
872     int start = 0;
873     int end = 0;
874     end = pStr.indexOf(ref);
875 
876     while (end != -1) {
877       String temp = pStr.substring(start, end);
878       sb.append(temp);
879       pStr = pStr.substring(end + ref.length());
880       start = pStr.indexOf(endRef);
881       end = pStr.indexOf(ref);
882     }
883     String temp2 = pStr.substring(start);
884     sb.append(temp2);
885 
886     if (isLoggingDebug())
887       logDebug("Final sql string -> references removed: \n" + sb.toString());
888 
889     return sb.toString();
890   }
891 
892   private String stripForeignKey(String pStr) {
893     if (isLoggingDebug()) {
894       logDebug("Removing Foreign Key from SQL string...");
895       if (this.getDebugLevel() > 6)
896         logDebug("SQL string before Foreign Key are removed: \n" + pStr);
897     }
898 
899     String key = "foreign key";
900     int flag = 0;
901     int end = 0;
902     end = pStr.toLowerCase().lastIndexOf(key);
903 
904     while (end != -1) {
905       flag = 1;
906       pStr = pStr.substring(0, end);
907       end = pStr.toLowerCase().lastIndexOf(key);
908     }
909     end = pStr.lastIndexOf(",");
910     if (flag == 0)
911       return pStr;
912     else
913       return pStr.substring(0, end) + " )";
914   }
915 
916   /*** This method is used to extract a possible CREATE INDEX statement from
917    *  a CREATE TABLE statement that is generated by a Table.  If no CREATE
918    *  INDEX statement is included, it returns null
919    */
920   private String extractIndexStatement(String pStatement) {
921     String search = "CREATE INDEX ";
922     String copy = pStatement.toUpperCase();
923     int i = copy.indexOf(search);
924     if (i != -1)
925       return stripTrailingSemiColon(pStatement.substring(i));
926 
927     return null;
928   }
929 
930   /*** This method is used to remove any possible CREATE INDEX statements from
931    *  the end of a CREATE TABLE statement generated by a Table.  It returns the
932    *  CREATE TABLE statement with all CREATE INDEX statements removed.
933    */
934   private String removeIndexStatements(String pStatement) {
935     String search = "CREATE INDEX ";
936     String copy = pStatement.toUpperCase();
937     int i = copy.indexOf(search);
938     if (i != -1)
939       pStatement = pStatement.substring(0, i);
940 
941     return stripTrailingSemiColon(pStatement);
942   }
943 
944   /*** This method is used to remove the trailing semicolon from a String.  It is assumed
945    *  that these strings will only possibly have one semicolon, and that if there is one
946    *  everything after the semicolon is junk.
947    */
948   private String stripTrailingSemiColon(String pStr) {
949     if (pStr == null)
950       return pStr;
951     int idx = pStr.indexOf(";");
952     if (idx != -1)
953       pStr = pStr.substring(0, idx);
954 
955     return pStr;
956   }
957 
958   // ---------- methods to help with user-specified SQL files -----------
959   // allowable db types to specify
960   public String SOLID = "solid";
961 
962   public String ORACLE = "oracle";
963 
964   public String MICROSOFT = "microsoft";
965 
966   public String INFORMIX = "informix";
967 
968   public String DB2 = "db2";
969 
970   public String SYBASE = "sybase";
971 
972   public String SYBASE2 = "Adaptive Server Enterprise"; // sybase 12.5
973 
974   public String DEFAULT = "default";
975 
976   private String[] dbTypes = { SOLID, ORACLE, MICROSOFT, INFORMIX, DB2, SYBASE,
977       SYBASE2, DEFAULT };
978 
979   /*** returns the dbtype for the database being used.  returned value will be one
980    *  of the constants SOLID, ORACLE, MICROSOFT, INFORMIX, DB2, SYBASE, or DEFAULT
981    *  if db type can not be determined.
982    */
983   private String getDatabaseType() {
984     String type = getDatabaseName();
985     for (int i = 0; i < dbTypes.length; i++) {
986       if (type.toLowerCase().indexOf(dbTypes[i].toLowerCase()) > -1) {
987         if (dbTypes[i].equals(SYBASE2))
988           return SYBASE;
989         return dbTypes[i];
990       }
991     }
992     return DEFAULT;
993   }
994 
995   /*** returns array of user-specified SQL files that should be executed, or null
996    *  if output from startSQLRepository should be used.
997    *  @exception RepositoryException if an error occurs getting the array of files to execute
998    */
999   private String[] getSpecifiedCreateFiles() throws RepositoryException {
1000     // try to get mapped value for this specific db type, and if it's empty try the default
1001     String files = (String) getSqlCreateFiles().get(getDatabaseType());
1002     if (files == null)
1003       files = (String) getSqlCreateFiles().get(DEFAULT);
1004     // if it's still empty then just return b/c there's nothing to execute
1005     if (files == null)
1006       return null;
1007 
1008     // if file list is not null, convert it and return the array
1009     try {
1010       return TestUtils.convertFileArray(files, ",");
1011     } catch (Exception e) {
1012       throw new RepositoryException(e);
1013     }
1014   }
1015 
1016   /*** returns array of user-specified SQL files that should be executed, or null
1017    *  if output from startSQLRepository should be used.
1018    *  @exception RepositoryException if an error occurs getting the array of files to execute
1019    */
1020   private String[] getSpecifiedDropFiles() throws RepositoryException {
1021     // try to get mapped value for this specific db type, and if it's empty try the default
1022     String files = (String) getSqlDropFiles().get(getDatabaseType());
1023     if (files == null)
1024       files = (String) getSqlDropFiles().get(DEFAULT);
1025     // if it's still empty then just return b/c there's nothing to execute
1026     if (files == null)
1027       return null;
1028 
1029     // if file list is not null, convert it and return the array
1030     try {
1031       return TestUtils.convertFileArray(files, ",");
1032     } catch (Exception e) {
1033       throw new RepositoryException(e);
1034     }
1035   }
1036 
1037 //  /*** verifies that SQL files specified by user are ok.  in particular, that if
1038 //   *  the user mapped a 'createSqlFile' for a db type there is a corresponding
1039 //   *  'dropSqlFile' entry, and vice-versa.
1040 //   *  @exception RepositoryException if anything is wrong
1041 //   */
1042 //  private void validateUserSpecifiedSqlFiles() throws RepositoryException {
1043 //    // don't let them be null
1044 //    if (getSqlCreateFiles() == null)
1045 //      setSqlCreateFiles(new Properties());
1046 //    if (getSqlDropFiles() == null)
1047 //      setSqlDropFiles(new Properties());
1048 //    // make sure all the keys are valid
1049 //    Set<Object> keys = new HashSet<Object>();
1050 //    keys.addAll(getSqlCreateFiles().keySet());
1051 //    keys.addAll(getSqlDropFiles().keySet());
1052 //    Set<String> allow_keys = new HashSet<String>();
1053 //    for (int i = 0; i < dbTypes.length; i++) {
1054 //      keys.remove(dbTypes[i]);
1055 //      if (!dbTypes[i].equals(SYBASE2))
1056 //        allow_keys.add(dbTypes[i]);
1057 //    }
1058 //    if (keys.size() > 0)
1059 //      throw new RepositoryException(
1060 //          "The following keys used in the 'sqlCreateFiles' and/or 'sqlDropFiles' properties "
1061 //              + "are invalid: " + keys + ".  Allowable keys are: " + allow_keys);
1062 //
1063 //    boolean isDefaultCreate = (getSqlCreateFiles().get(DEFAULT) != null);
1064 //    boolean isDefaultDrop = (getSqlDropFiles().get(DEFAULT) != null);
1065 //    // if there are defaults it will always be ok, so just return
1066 //    if (isDefaultCreate && isDefaultDrop)
1067 //      return;
1068 //
1069 //    // otherwise, check each dbType individually
1070 //    for (int i = 0; i < dbTypes.length; i++) {
1071 //      boolean isCreate = (getSqlCreateFiles().get(dbTypes[i]) != null);
1072 //      boolean isDrop = (getSqlDropFiles().get(dbTypes[i]) != null);
1073 //      if (isCreate && !isDrop && !isDefaultDrop)
1074 //        throw new RepositoryException(
1075 //            "Mapping exists for database type "
1076 //                + dbTypes[i]
1077 //                + " in property 'sqlCreateFiles', but not in property 'sqlDropFiles', and "
1078 //                + "there is no default specified.");
1079 //      if (isDrop && !isCreate && !isDefaultCreate)
1080 //        throw new RepositoryException(
1081 //            "Mapping exists for database type "
1082 //                + dbTypes[i]
1083 //                + " in property 'sqlDropFiles', but not in property 'sqlCreateFiles', and "
1084 //                + "there is no default specified.");
1085 //    }
1086 //  }
1087 
1088   /*** executes the specified SQL files against this Repository's DataSource.
1089    *  @param String[] the files to execute
1090    *  @param boolean true if execution should stop at first error.  if false, then a warning
1091    *  will be printed for encountered errors.
1092    *  @exception RepositoryException if pStopAtError is true and an error occurs while
1093    *  executing one of the sql statements.
1094    */
1095   private void executeSqlFiles(String[] pFiles, boolean pStopAtError)
1096       throws RepositoryException {
1097     SQLProcessor sp = new SQLProcessor(getTransactionManager(), getDataSource());
1098     // for sql server auto-commit must be true
1099     //            if ( getDatabaseType().equals( MICROSOFT ) ) sp.setAutoCommit(true);
1100     SQLFileParser parser = new SQLFileParser();
1101     for (int i = 0; i < pFiles.length; i++) {
1102       String file = pFiles[i];
1103       // switch the file path so everything is forward slashes
1104       file = file.replace('//', '/');
1105       String cmd = null;
1106       Iterator<?> cmds = null;
1107       if (isLoggingInfo())
1108         logInfo("Executing SQL file: " + file);
1109       if (!new File(file).exists())
1110         throw new RepositoryException("SQL file " + file + " does not exist.");
1111 
1112       // parse the file to get commands...
1113       try {
1114         Collection<?> c = parser.parseSQLFile(file);
1115         if (isLoggingDebug())
1116           logDebug("Parsed " + c.size() + " SQL command(s) from file.");
1117         cmds = c.iterator();
1118       } catch (Exception e) {
1119         // an error parsing the file indicates something very wrong, so bail
1120         throw new RepositoryException("Error encountered parsing SQL file "
1121             + file, e);
1122       }
1123 
1124       // then execute the commands...
1125       while (cmds.hasNext()) {
1126         cmd = (String) cmds.next();
1127         if (isLoggingDebug() || isLoggingCreateTables())
1128           logDebug("Executing SQL cmd [" + cmd + "]");
1129         try {
1130           sp.executeSQL(cmd);
1131         } catch (Exception e) {
1132           if (pStopAtError) {
1133             throw new RepositoryException("Error received executing command ["
1134                 + cmd + "] from SQL file " + file, e);
1135           } else {
1136             if (isLoggingWarning())
1137               logWarning("Error received executing command [" + cmd
1138                   + "] from SQL file " + file + ": " + e.getMessage());
1139           }
1140         }
1141       }
1142     }
1143   }
1144 
1145 } // end of class
1146