From 566fb78ef39c3194d9fc7236255531ccc3a3bcfa Mon Sep 17 00:00:00 2001 From: Zhichun Wu Date: Sun, 28 Aug 2016 10:30:17 +0800 Subject: [PATCH 01/15] Initial import * add Pentaho CE 6.1.0.1-196 source code * setup maven build environment --- .gitignore | 13 + pentaho-kettle/pom.xml | 75 + .../di/core/database/util/DatabaseUtil.java | 196 + .../di/core/logging/LogChannelFileWriter.java | 216 + .../di/core/logging/LoggingRegistry.java | 250 + .../src/main/java/org/pentaho/di/job/Job.java | 2496 +++++++ .../main/java/org/pentaho/di/job/JobMeta.java | 2853 ++++++++ .../di/job/entries/job/JobEntryJob.java | 1567 +++++ .../di/job/entries/trans/JobEntryTrans.java | 1563 ++++ .../pur/LazyUnifiedRepositoryDirectory.java | 348 + .../org/pentaho/di/resource/ResourceUtil.java | 185 + .../org/pentaho/di/trans/steps/Trans.java | 5580 +++++++++++++++ .../org/pentaho/di/trans/steps/TransMeta.java | 6265 +++++++++++++++++ .../di/trans/steps/append/AppendMeta.java | 215 + .../steps/filterrows/FilterRowsMeta.java | 426 ++ .../steps/javafilter/JavaFilterMeta.java | 270 + .../trans/steps/mergejoin/MergeJoinMeta.java | 329 + .../trans/steps/mergerows/MergeRowsMeta.java | 358 + .../steps/tableinput/TableInputMeta.java | 572 ++ .../main/java/org/pentaho/di/www/Carte.java | 391 + .../org/pentaho/di/www/CarteSingleton.java | 297 + .../java/org/pentaho/di/www/WebServer.java | 521 ++ .../src/main/resources/kettle-servlets.xml | 63 + pentaho-platform/pom.xml | 62 + .../quartz/ActionAdapterQuartzJob.java | 451 ++ .../scheduler2/quartz/QuartzScheduler.java | 842 +++ pom.xml | 74 + 27 files changed, 26478 insertions(+) create mode 100644 pentaho-kettle/pom.xml create mode 100644 pentaho-kettle/src/main/java/org/pentaho/di/core/database/util/DatabaseUtil.java create mode 100644 pentaho-kettle/src/main/java/org/pentaho/di/core/logging/LogChannelFileWriter.java create mode 100644 pentaho-kettle/src/main/java/org/pentaho/di/core/logging/LoggingRegistry.java create mode 100644 pentaho-kettle/src/main/java/org/pentaho/di/job/Job.java create mode 100644 pentaho-kettle/src/main/java/org/pentaho/di/job/JobMeta.java create mode 100644 pentaho-kettle/src/main/java/org/pentaho/di/job/entries/job/JobEntryJob.java create mode 100644 pentaho-kettle/src/main/java/org/pentaho/di/job/entries/trans/JobEntryTrans.java create mode 100644 pentaho-kettle/src/main/java/org/pentaho/di/repository/pur/LazyUnifiedRepositoryDirectory.java create mode 100644 pentaho-kettle/src/main/java/org/pentaho/di/resource/ResourceUtil.java create mode 100644 pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/Trans.java create mode 100644 pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/TransMeta.java create mode 100644 pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/append/AppendMeta.java create mode 100644 pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/filterrows/FilterRowsMeta.java create mode 100644 pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/javafilter/JavaFilterMeta.java create mode 100644 pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/mergejoin/MergeJoinMeta.java create mode 100644 pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/mergerows/MergeRowsMeta.java create mode 100644 pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/tableinput/TableInputMeta.java create mode 100644 pentaho-kettle/src/main/java/org/pentaho/di/www/Carte.java create mode 100644 pentaho-kettle/src/main/java/org/pentaho/di/www/CarteSingleton.java create mode 100644 pentaho-kettle/src/main/java/org/pentaho/di/www/WebServer.java create mode 100644 pentaho-kettle/src/main/resources/kettle-servlets.xml create mode 100644 pentaho-platform/pom.xml create mode 100644 pentaho-platform/src/main/java/org/pentaho/platform/scheduler2/quartz/ActionAdapterQuartzJob.java create mode 100644 pentaho-platform/src/main/java/org/pentaho/platform/scheduler2/quartz/QuartzScheduler.java create mode 100644 pom.xml diff --git a/.gitignore b/.gitignore index 32858aa..119761f 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,16 @@ # virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml hs_err_pid* + +# IDEA / Eclipse / Maven +target/ +testing/ +.settings +.classpath +.project +doc +docs +notes +.DS_Store +/.idea +*.iml \ No newline at end of file diff --git a/pentaho-kettle/pom.xml b/pentaho-kettle/pom.xml new file mode 100644 index 0000000..28b1de3 --- /dev/null +++ b/pentaho-kettle/pom.xml @@ -0,0 +1,75 @@ + + 4.0.0 + + com.github.zhicwu + pdi-cluster + 6.1.0.1-SNAPSHOT + + pentaho-kettle + jar + Pentaho Kettle CE + Workarounds for Pentaho Kettle Community Edition. + https://github.com/zhicwu/pdi-cluster + + + ${project.parent.basedir} + 3.1.0 + 1.16 + + + + + pentaho + pentaho-platform-api + ${pentaho-ce.version} + + + pentaho + pentaho-platform-extensions + ${pentaho-ce.version} + + + + org.apache.xbean + xbean + + + + + pentaho-kettle + kettle-core + ${pentaho-ce.version} + + + pentaho-kettle + kettle-engine + ${pentaho-ce.version} + + + pentaho-kettle + pdi-pur-plugin + ${pentaho-ce.version} + + + + + Apache 2 + http://www.apache.org/licenses/LICENSE-2.0.txt + repo + Apache License Version 2.0 + + + + + scm:git:git@github.com:zhicwu/pdi-cluster.git + scm:git:git@github.com:zhicwu/pdi-cluster.git + https://github.com/zhicwu/pdi-cluster + ${project.version} + + + + + Zhichun Wu + + + \ No newline at end of file diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/core/database/util/DatabaseUtil.java b/pentaho-kettle/src/main/java/org/pentaho/di/core/database/util/DatabaseUtil.java new file mode 100644 index 0000000..b2bbed3 --- /dev/null +++ b/pentaho-kettle/src/main/java/org/pentaho/di/core/database/util/DatabaseUtil.java @@ -0,0 +1,196 @@ +/*! ****************************************************************************** + * + * Pentaho Data Integration + * + * Copyright (C) 2002-2013 by Pentaho : http://www.pentaho.com + * + ******************************************************************************* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + ******************************************************************************/ + +package org.pentaho.di.core.database.util; + +import java.sql.Connection; +import java.sql.Statement; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import javax.naming.InitialContext; +import javax.naming.NamingException; +import javax.sql.DataSource; + +import org.pentaho.di.core.Const; +import org.pentaho.di.core.database.DataSourceNamingException; +import org.pentaho.di.core.database.DataSourceProviderInterface; +import org.pentaho.di.core.database.Database; +import org.pentaho.di.i18n.BaseMessages; + +import javax.naming.Context; + +/** + * Provides default implementation for looking data sources up in JNDI. + * + * @author mbatchel + */ + +public class DatabaseUtil implements DataSourceProviderInterface { + private static Class PKG = Database.class; // for i18n purposes, needed by Translator2!! + private static Map FoundDS = Collections.synchronizedMap( new HashMap() ); + + /** + * Clears cache of DataSources (For Unit test) + */ + protected static void clearDSCache() { + FoundDS.clear(); + } + + /** + * Since JNDI is supported different ways in different app servers, it's nearly impossible to have a ubiquitous way to + * look up a datasource. This method is intended to hide all the lookups that may be required to find a jndi name. + * + * @param dsName The Datasource name + * @return DataSource if there is one bound in JNDI + * @throws NamingException + */ + protected static DataSource getDataSourceFromJndi( String dsName, Context ctx ) throws NamingException { + if ( Const.isEmpty( dsName ) ) { + throw new NamingException( BaseMessages.getString( PKG, "DatabaseUtil.DSNotFound", String.valueOf( dsName ) ) ); + } + Object foundDs = FoundDS.get( dsName ); + if ( foundDs != null ) { + return (DataSource) foundDs; + } + Object lkup = null; + DataSource rtn = null; + NamingException firstNe = null; + // First, try what they ask for... + try { + lkup = ctx.lookup( dsName ); + if ( lkup instanceof DataSource ) { + rtn = (DataSource) lkup; + FoundDS.put( dsName, rtn ); + return rtn; + } + } catch ( NamingException ignored ) { + firstNe = ignored; + } + try { + // Needed this for Jboss + lkup = ctx.lookup( "java:" + dsName ); + if ( lkup instanceof DataSource ) { + rtn = (DataSource) lkup; + FoundDS.put( dsName, rtn ); + return rtn; + } + } catch ( NamingException ignored ) { + // ignore + } + try { + // Tomcat + lkup = ctx.lookup( "java:comp/env/jdbc/" + dsName ); + if ( lkup instanceof DataSource ) { + rtn = (DataSource) lkup; + FoundDS.put( dsName, rtn ); + return rtn; + } + } catch ( NamingException ignored ) { + // ignore + } + try { + // Others? + lkup = ctx.lookup( "jdbc/" + dsName ); + if ( lkup instanceof DataSource ) { + rtn = (DataSource) lkup; + FoundDS.put( dsName, rtn ); + return rtn; + } + } catch ( NamingException ignored ) { + // ignore + } + if ( firstNe != null ) { + throw firstNe; + } + throw new NamingException( BaseMessages.getString( PKG, "DatabaseUtil.DSNotFound", dsName ) ); + } + + public static void closeSilently( Connection[] connections ) { + if ( connections == null || connections.length == 0 ) { + return; + } + for ( Connection conn : connections ) { + closeSilently( conn ); + } + } + + public static void closeSilently( Connection conn ) { + if ( conn == null ) { + return; + } + try { + conn.close(); + } catch ( Throwable e ) { + // omit + } + } + + public static void closeSilently( Statement[] statements ) { + if ( statements == null || statements.length == 0 ) { + return; + } + for ( Statement st : statements ) { + closeSilently( st ); + } + } + + public static void closeSilently( Statement st ) { + if ( st == null ) { + return; + } + try { + st.close(); + } catch ( Throwable e ) { + // omit + } + } + + + /** + * Implementation of DatasourceProviderInterface. + */ + @Override + public DataSource getNamedDataSource( String datasourceName ) throws DataSourceNamingException { + try { + return DatabaseUtil.getDataSourceFromJndi( datasourceName, new InitialContext() ); + } catch ( NamingException ex ) { + throw new DataSourceNamingException( ex ); + } + } + + @Override + public DataSource getNamedDataSource( String datasourceName, DatasourceType type ) + throws DataSourceNamingException { + if ( type != null ) { + switch( type ) { + case JNDI: + return getNamedDataSource( datasourceName ); + case POOLED: + throw new UnsupportedOperationException( + getClass().getName() + " does not support providing pooled data sources" ); + } + } + throw new IllegalArgumentException( "Unsupported data source type: " + type ); + } +} diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/core/logging/LogChannelFileWriter.java b/pentaho-kettle/src/main/java/org/pentaho/di/core/logging/LogChannelFileWriter.java new file mode 100644 index 0000000..acf37c9 --- /dev/null +++ b/pentaho-kettle/src/main/java/org/pentaho/di/core/logging/LogChannelFileWriter.java @@ -0,0 +1,216 @@ +/*! ****************************************************************************** + * + * Pentaho Data Integration + * + * Copyright (C) 2002-2013 by Pentaho : http://www.pentaho.com + * + ******************************************************************************* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + ******************************************************************************/ + +package org.pentaho.di.core.logging; + +import java.io.IOException; +import java.io.OutputStream; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.vfs2.FileObject; +import org.pentaho.di.core.exception.KettleException; +import org.pentaho.di.core.vfs.KettleVFS; + +/** + * This class takes care of polling the central log store for new log messages belonging to a certain log channel ID + * (and children). The log lines will be written to a logging file. + * + * @author matt + * + */ +public class LogChannelFileWriter { + private String logChannelId; + private FileObject logFile; + private boolean appending; + private int pollingInterval; + + private AtomicBoolean active; + private KettleException exception; + private int lastBufferLineNr; + protected OutputStream logFileOutputStream; + + /** + * Create a new log channel file writer + * + * @param logChannelId + * The log channel (+children) to write to the log file + * @param logFile + * The logging file to write to + * @param appending + * set to true if you want to append to an existing file + * @param pollingInterval + * The polling interval in milliseconds. + * + * @throws KettleException + * in case the specified log file can't be created. + */ + public LogChannelFileWriter( String logChannelId, FileObject logFile, boolean appending, int pollingInterval ) throws KettleException { + this.logChannelId = logChannelId; + this.logFile = logFile; + this.appending = appending; + this.pollingInterval = pollingInterval; + + active = new AtomicBoolean( false ); + lastBufferLineNr = KettleLogStore.getLastBufferLineNr(); + + try { + logFileOutputStream = KettleVFS.getOutputStream( logFile, appending ); + } catch ( IOException e ) { + throw new KettleException( "There was an error while trying to open file '" + logFile + "' for writing", e ); + } + } + + /** + * Create a new log channel file writer + * + * @param logChannelId + * The log channel (+children) to write to the log file + * @param logFile + * The logging file to write to + * @param appending + * set to true if you want to append to an existing file + * + * @throws KettleException + * in case the specified log file can't be created. + */ + public LogChannelFileWriter( String logChannelId, FileObject logFile, boolean appending ) throws KettleException { + this( logChannelId, logFile, appending, 1000 ); + } + + /** + * Start the logging thread which will write log data from the specified log channel to the log file. In case of an + * error, the exception will be available with method getException(). + */ + public void startLogging() { + + exception = null; + active.set( true ); + + Thread thread = new Thread( new Runnable() { + public void run() { + try { + + while ( active.get() && exception == null ) { + flush(); + Thread.sleep( pollingInterval ); + } + // When done, save the last bit as well... + flush(); + + } catch ( Exception e ) { + exception = new KettleException( "There was an error logging to file '" + logFile + "'", e ); + } finally { + try { + if ( logFileOutputStream != null ) { + logFileOutputStream.close(); + logFileOutputStream = null; + } + } catch ( Exception e ) { + exception = new KettleException( "There was an error closing log file file '" + logFile + "'", e ); + } + } + } + } ); + thread.start(); + } + + public synchronized void flush() { + try { + int last = KettleLogStore.getLastBufferLineNr(); + StringBuffer buffer = KettleLogStore.getAppender().getBuffer( logChannelId, false, lastBufferLineNr, last ); + logFileOutputStream.write( buffer.toString().getBytes() ); + lastBufferLineNr = last; + logFileOutputStream.flush(); + } catch ( Exception e ) { + exception = new KettleException( "There was an error logging to file '" + logFile + "'", e ); + } + } + + public void stopLogging() { + flush(); + active.set( false ); + } + + public KettleException getException() { + return exception; + } + + /** + * @return the logChannelId + */ + public String getLogChannelId() { + return logChannelId; + } + + /** + * @param logChannelId + * the logChannelId to set + */ + public void setLogChannelId( String logChannelId ) { + this.logChannelId = logChannelId; + } + + /** + * @return the logFile + */ + public FileObject getLogFile() { + return logFile; + } + + /** + * @param logFile + * the logFile to set + */ + public void setLogFile( FileObject logFile ) { + this.logFile = logFile; + } + + /** + * @return the appending + */ + public boolean isAppending() { + return appending; + } + + /** + * @param appending + * the appending to set + */ + public void setAppending( boolean appending ) { + this.appending = appending; + } + + /** + * @return the pollingInterval + */ + public int getPollingInterval() { + return pollingInterval; + } + + /** + * @param pollingInterval + * the pollingInterval to set + */ + public void setPollingInterval( int pollingInterval ) { + this.pollingInterval = pollingInterval; + } +} diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/core/logging/LoggingRegistry.java b/pentaho-kettle/src/main/java/org/pentaho/di/core/logging/LoggingRegistry.java new file mode 100644 index 0000000..00d906d --- /dev/null +++ b/pentaho-kettle/src/main/java/org/pentaho/di/core/logging/LoggingRegistry.java @@ -0,0 +1,250 @@ +/*! ****************************************************************************** + * + * Pentaho Data Integration + * + * Copyright (C) 2002-2015 by Pentaho : http://www.pentaho.com + * + ******************************************************************************* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + ******************************************************************************/ + +package org.pentaho.di.core.logging; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.Date; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.ConcurrentHashMap; + +import org.pentaho.di.core.Const; +import org.pentaho.di.core.util.EnvUtil; + +public class LoggingRegistry { + private static LoggingRegistry registry = new LoggingRegistry(); + private Map map; + private Map> childrenMap; + private Date lastModificationTime; + private int maxSize; + private final int DEFAULT_MAX_SIZE = 10000; + + private Object syncObject = new Object(); + + private LoggingRegistry() { + this.map = new ConcurrentHashMap(); + this.childrenMap = new ConcurrentHashMap>(); + + this.lastModificationTime = new Date(); + this.maxSize = Const.toInt( EnvUtil.getSystemProperty( "KETTLE_MAX_LOGGING_REGISTRY_SIZE" ), DEFAULT_MAX_SIZE ); + } + + public static LoggingRegistry getInstance() { + return registry; + } + + public String registerLoggingSource( Object object ) { + synchronized ( this.syncObject ) { + + this.maxSize = Const.toInt( EnvUtil.getSystemProperty( "KETTLE_MAX_LOGGING_REGISTRY_SIZE" ), 10000 ); + + LoggingObject loggingSource = new LoggingObject( object ); + + LoggingObjectInterface found = findExistingLoggingSource( loggingSource ); + if ( found != null ) { + LoggingObjectInterface foundParent = found.getParent(); + LoggingObjectInterface loggingSourceParent = loggingSource.getParent(); + if ( foundParent != null && loggingSourceParent != null ) { + String foundParentLogChannelId = foundParent.getLogChannelId(); + String sourceParentLogChannelId = loggingSourceParent.getLogChannelId(); + if ( foundParentLogChannelId != null && sourceParentLogChannelId != null + && foundParentLogChannelId.equals( sourceParentLogChannelId ) ) { + String foundLogChannelId = found.getLogChannelId(); + if ( foundLogChannelId != null ) { + return foundLogChannelId; + } + } + } + } + + String logChannelId = UUID.randomUUID().toString(); + loggingSource.setLogChannelId( logChannelId ); + + this.map.put( logChannelId, loggingSource ); + + if ( loggingSource.getParent() != null ) { + String parentLogChannelId = loggingSource.getParent().getLogChannelId(); + if ( parentLogChannelId != null ) { + List parentChildren = this.childrenMap.get( parentLogChannelId ); + if ( parentChildren == null ) { + parentChildren = new ArrayList(); + this.childrenMap.put( parentLogChannelId, parentChildren ); + } + parentChildren.add( logChannelId ); + } + } + + this.lastModificationTime = new Date(); + loggingSource.setRegistrationDate( this.lastModificationTime ); + + if ( ( this.maxSize > 0 ) && ( this.map.size() > this.maxSize ) ) { + List all = new ArrayList( this.map.values() ); + Collections.sort( all, new Comparator() { + @Override + public int compare( LoggingObjectInterface o1, LoggingObjectInterface o2 ) { + if ( ( o1 == null ) && ( o2 != null ) ) { + return -1; + } + if ( ( o1 != null ) && ( o2 == null ) ) { + return 1; + } + if ( ( o1 == null ) && ( o2 == null ) ) { + return 0; + } + if ( o1.getRegistrationDate() == null && o2.getRegistrationDate() != null ) { + return -1; + } + if ( o1.getRegistrationDate() != null && o2.getRegistrationDate() == null ) { + return 1; + } + if ( o1.getRegistrationDate() == null && o2.getRegistrationDate() == null ) { + return 0; + } + return ( o1.getRegistrationDate().compareTo( o2.getRegistrationDate() ) ); + } + } ); + int cutCount = this.maxSize < 1000 ? this.maxSize : 1000; + for ( int i = 0; i < cutCount; i++ ) { + LoggingObjectInterface toRemove = all.get( i ); + this.map.remove( toRemove.getLogChannelId() ); + } + removeOrphans(); + } + return logChannelId; + } + } + + public LoggingObjectInterface findExistingLoggingSource( LoggingObjectInterface loggingObject ) { + LoggingObjectInterface found = null; + for ( LoggingObjectInterface verify : this.map.values() ) { + if ( loggingObject.equals( verify ) ) { + found = verify; + break; + } + } + return found; + } + + public LoggingObjectInterface getLoggingObject( String logChannelId ) { + return this.map.get( logChannelId ); + } + + public Map getMap() { + return this.map; + } + + public List getLogChannelChildren( String parentLogChannelId ) { + if ( parentLogChannelId == null ) { + return null; + } + List list = getLogChannelChildren( new ArrayList(), parentLogChannelId ); + list.add( parentLogChannelId ); + return list; + } + + private List getLogChannelChildren( List children, String parentLogChannelId ) { + synchronized ( this.syncObject ) { + List list = this.childrenMap.get( parentLogChannelId ); + if ( list == null ) { + // Don't do anything, just return the input. + return children; + } + + Iterator kids = list.iterator(); + while ( kids.hasNext() ) { + String logChannelId = kids.next(); + + // Add the children recursively + getLogChannelChildren( children, logChannelId ); + + // Also add the current parent + children.add( logChannelId ); + } + } + + return children; + } + + public Date getLastModificationTime() { + return this.lastModificationTime; + } + + public String dump( boolean includeGeneral ) { + StringBuffer out = new StringBuffer( 50000 ); + for ( LoggingObjectInterface o : this.map.values() ) { + if ( ( includeGeneral ) || ( !o.getObjectType().equals( LoggingObjectType.GENERAL ) ) ) { + out.append( o.getContainerObjectId() ); + out.append( "\t" ); + out.append( o.getLogChannelId() ); + out.append( "\t" ); + out.append( o.getObjectType().name() ); + out.append( "\t" ); + out.append( o.getObjectName() ); + out.append( "\t" ); + out.append( o.getParent() != null ? o.getParent().getLogChannelId() : "-" ); + out.append( "\t" ); + out.append( o.getParent() != null ? o.getParent().getObjectType().name() : "-" ); + out.append( "\t" ); + out.append( o.getParent() != null ? o.getParent().getObjectName() : "-" ); + out.append( "\n" ); + } + } + return out.toString(); + } + + /** + * For junit testing purposes + * @return ro items map + */ + Map dumpItems() { + return Collections.unmodifiableMap( this.map ); + } + + /** + * For junit testing purposes + * @return ro parent-child relations map + */ + Map> dumpChildren() { + return Collections.unmodifiableMap( this.childrenMap ); + } + + public void removeIncludingChildren( String logChannelId ) { + synchronized ( this.map ) { + List children = getLogChannelChildren( logChannelId ); + for ( String child : children ) { + this.map.remove( child ); + } + this.map.remove( logChannelId ); + removeOrphans(); + } + } + + public void removeOrphans() { + // Remove all orphaned children + this.childrenMap.keySet().retainAll( this.map.keySet() ); + } +} diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/job/Job.java b/pentaho-kettle/src/main/java/org/pentaho/di/job/Job.java new file mode 100644 index 0000000..760970e --- /dev/null +++ b/pentaho-kettle/src/main/java/org/pentaho/di/job/Job.java @@ -0,0 +1,2496 @@ +//CHECKSTYLE:FileLength:OFF +/*! ****************************************************************************** + * + * Pentaho Data Integration + * + * Copyright (C) 2002-2016 by Pentaho : http://www.pentaho.com + * + ******************************************************************************* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + ******************************************************************************/ + +package org.pentaho.di.job; + +import java.net.URLEncoder; +import java.util.ArrayList; +import java.util.Date; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Queue; +import java.util.Timer; +import java.util.TimerTask; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +import org.apache.commons.vfs2.FileName; +import org.apache.commons.vfs2.FileObject; +import org.pentaho.di.cluster.SlaveServer; +import org.pentaho.di.core.Const; +import org.pentaho.di.core.ExecutorInterface; +import org.pentaho.di.core.ExtensionDataInterface; +import org.pentaho.di.core.KettleEnvironment; +import org.pentaho.di.core.Result; +import org.pentaho.di.core.RowMetaAndData; +import org.pentaho.di.core.database.Database; +import org.pentaho.di.core.database.DatabaseMeta; +import org.pentaho.di.core.exception.KettleDatabaseException; +import org.pentaho.di.core.exception.KettleException; +import org.pentaho.di.core.exception.KettleJobException; +import org.pentaho.di.core.exception.KettleValueException; +import org.pentaho.di.core.extension.ExtensionPointHandler; +import org.pentaho.di.core.extension.KettleExtensionPoint; +import org.pentaho.di.core.gui.JobTracker; +import org.pentaho.di.core.logging.ChannelLogTable; +import org.pentaho.di.core.logging.DefaultLogLevel; +import org.pentaho.di.core.logging.HasLogChannelInterface; +import org.pentaho.di.core.logging.JobEntryLogTable; +import org.pentaho.di.core.logging.JobLogTable; +import org.pentaho.di.core.logging.KettleLogStore; +import org.pentaho.di.core.logging.LogChannel; +import org.pentaho.di.core.logging.LogChannelInterface; +import org.pentaho.di.core.logging.LogLevel; +import org.pentaho.di.core.logging.LogStatus; +import org.pentaho.di.core.logging.LoggingBuffer; +import org.pentaho.di.core.logging.LoggingHierarchy; +import org.pentaho.di.core.logging.LoggingObjectInterface; +import org.pentaho.di.core.logging.LoggingObjectType; +import org.pentaho.di.core.logging.LoggingRegistry; +import org.pentaho.di.core.logging.Metrics; +import org.pentaho.di.core.parameters.DuplicateParamException; +import org.pentaho.di.core.parameters.NamedParams; +import org.pentaho.di.core.parameters.NamedParamsDefault; +import org.pentaho.di.core.parameters.UnknownParamException; +import org.pentaho.di.core.row.RowMetaInterface; +import org.pentaho.di.core.row.ValueMeta; +import org.pentaho.di.core.util.EnvUtil; +import org.pentaho.di.core.variables.VariableSpace; +import org.pentaho.di.core.variables.Variables; +import org.pentaho.di.core.vfs.KettleVFS; +import org.pentaho.di.i18n.BaseMessages; +import org.pentaho.di.job.entries.job.JobEntryJob; +import org.pentaho.di.job.entries.special.JobEntrySpecial; +import org.pentaho.di.job.entries.trans.JobEntryTrans; +import org.pentaho.di.job.entry.JobEntryCopy; +import org.pentaho.di.job.entry.JobEntryInterface; +import org.pentaho.di.repository.ObjectId; +import org.pentaho.di.repository.ObjectRevision; +import org.pentaho.di.repository.Repository; +import org.pentaho.di.repository.RepositoryDirectoryInterface; +import org.pentaho.di.resource.ResourceUtil; +import org.pentaho.di.resource.TopLevelResource; +import org.pentaho.di.trans.Trans; +import org.pentaho.di.www.AddExportServlet; +import org.pentaho.di.www.RegisterJobServlet; +import org.pentaho.di.www.SocketRepository; +import org.pentaho.di.www.StartJobServlet; +import org.pentaho.di.www.WebResult; +import org.pentaho.metastore.api.IMetaStore; + +/** + * This class executes a job as defined by a JobMeta object. + *

+ * The definition of a PDI job is represented by a JobMeta object. It is typically loaded from a .kjb file, a PDI + * repository, or it is generated dynamically. The declared parameters of the job definition are then queried using + * listParameters() and assigned values using calls to setParameterValue(..). + * + * @author Matt Casters + * @since 07-apr-2003 + * + */ +public class Job extends Thread implements VariableSpace, NamedParams, HasLogChannelInterface, LoggingObjectInterface, + ExecutorInterface, ExtensionDataInterface { + private static Class PKG = Job.class; // for i18n purposes, needed by Translator2!! + + public static final String CONFIGURATION_IN_EXPORT_FILENAME = "__job_execution_configuration__.xml"; + + private LogChannelInterface log; + + private LogLevel logLevel = DefaultLogLevel.getLogLevel(); + + private String containerObjectId; + + private JobMeta jobMeta; + + private int logCommitSize = 10; + + private Repository rep; + + private AtomicInteger errors; + + private VariableSpace variables = new Variables(); + + /** + * The job that's launching this (sub-) job. This gives us access to the whole chain, including the parent variables, + * etc. + */ + protected Job parentJob; + + /** + * The parent transformation + */ + protected Trans parentTrans; + + /** The parent logging interface to reference */ + private LoggingObjectInterface parentLoggingObject; + + /** + * Keep a list of the job entries that were executed. org.pentaho.di.core.logging.CentralLogStore.getInstance() + */ + private JobTracker jobTracker; + + /** + * A flat list of results in THIS job, in the order of execution of job entries + */ + private final LinkedList jobEntryResults = new LinkedList(); + + private Date startDate, endDate, currentDate, logDate, depDate; + + private AtomicBoolean active; + + private AtomicBoolean stopped; + + private long batchId; + + /** + * This is the batch ID that is passed from job to job to transformation, if nothing is passed, it's the job's batch + * id + */ + private long passedBatchId; + + /** + * The rows that were passed onto this job by a previous transformation. These rows are passed onto the first job + * entry in this job (on the result object) + */ + private List sourceRows; + + /** + * The result of the job, after execution. + */ + private Result result; + + private AtomicBoolean initialized; + + private boolean interactive; + + private List jobListeners; + + private List jobEntryListeners; + + private List delegationListeners; + + private Map activeJobEntryTransformations; + + private Map activeJobEntryJobs; + + /** + * Parameters of the job. + */ + private NamedParams namedParams = new NamedParamsDefault(); + + private AtomicBoolean finished; + + private SocketRepository socketRepository; + + private int maxJobEntriesLogged; + + private JobEntryCopy startJobEntryCopy; + private Result startJobEntryResult; + + private String executingServer; + + private String executingUser; + + private String transactionId; + + private Map extensionDataMap; + + /** The command line arguments for the job. */ + protected String[] arguments; + + /** + * Instantiates a new job. + * + * @param name + * the name + * @param file + * the file + * @param args + * the args + */ + public Job( String name, String file, String[] args ) { + this(); + jobMeta = new JobMeta(); + + if ( name != null ) { + setName( name + " (" + super.getName() + ")" ); + } + jobMeta.setName( name ); + jobMeta.setFilename( file ); + this.arguments = args; + + init(); + this.log = new LogChannel( this ); + } + + /** + * Initializes the Job. + */ + public void init() { + jobListeners = new ArrayList(); + jobEntryListeners = new ArrayList(); + delegationListeners = new ArrayList(); + + activeJobEntryTransformations = new HashMap(); + activeJobEntryJobs = new HashMap(); + + extensionDataMap = new HashMap(); + + active = new AtomicBoolean( false ); + stopped = new AtomicBoolean( false ); + jobTracker = new JobTracker( jobMeta ); + synchronized ( jobEntryResults ) { + jobEntryResults.clear(); + } + initialized = new AtomicBoolean( false ); + finished = new AtomicBoolean( false ); + errors = new AtomicInteger( 0 ); + batchId = -1; + passedBatchId = -1; + maxJobEntriesLogged = Const.toInt( EnvUtil.getSystemProperty( Const.KETTLE_MAX_JOB_ENTRIES_LOGGED ), 1000 ); + + result = null; + startJobEntryCopy = null; + startJobEntryResult = null; + + this.setDefaultLogCommitSize(); + } + + /** + * Sets the default log commit size. + */ + private void setDefaultLogCommitSize() { + String propLogCommitSize = this.getVariable( "pentaho.log.commit.size" ); + if ( propLogCommitSize != null ) { + // override the logCommit variable + try { + logCommitSize = Integer.parseInt( propLogCommitSize ); + } catch ( Exception ignored ) { + logCommitSize = 10; // ignore parsing error and default to 10 + } + } + } + + /** + * Instantiates a new job. + * + * @param repository + * the repository + * @param jobMeta + * the job meta + */ + public Job( Repository repository, JobMeta jobMeta ) { + this( repository, jobMeta, null ); + } + + /** + * Instantiates a new job. + * + * @param repository + * the repository + * @param jobMeta + * the job meta + * @param parentLogging + * the parent logging + */ + public Job( Repository repository, JobMeta jobMeta, LoggingObjectInterface parentLogging ) { + this.rep = repository; + this.jobMeta = jobMeta; + this.parentLoggingObject = parentLogging; + + init(); + + jobTracker = new JobTracker( jobMeta ); + + this.log = new LogChannel( this, parentLogging ); + this.logLevel = log.getLogLevel(); + this.containerObjectId = log.getContainerObjectId(); + } + + /** + * Empty constructor, for Class.newInstance() + */ + public Job() { + init(); + this.log = new LogChannel( this ); + this.logLevel = log.getLogLevel(); + } + + /** + * Gets the name property of the JobMeta property. + * + * @return String name for the JobMeta + */ + @Override + public String toString() { + if ( jobMeta == null || Const.isEmpty( jobMeta.getName() ) ) { + return getName(); + } else { + return jobMeta.getName(); + } + } + + /** + * Creates the job with new class loader. + * + * @return the job + * @throws KettleException + * the kettle exception + */ + public static final Job createJobWithNewClassLoader() throws KettleException { + try { + // Load the class. + Class jobClass = Const.createNewClassLoader().loadClass( Job.class.getName() ); + + // create the class + // Try to instantiate this one... + Job job = (Job) jobClass.newInstance(); + + // Done! + return job; + } catch ( Exception e ) { + String message = BaseMessages.getString( PKG, "Job.Log.ErrorAllocatingNewJob", e.toString() ); + throw new KettleException( message, e ); + } + } + + /** + * Gets the jobname. + * + * @return the jobname + */ + public String getJobname() { + if ( jobMeta == null ) { + return null; + } + + return jobMeta.getName(); + } + + /** + * Sets the repository. + * + * @param rep + * the new repository + */ + public void setRepository( Repository rep ) { + this.rep = rep; + } + + /** + * Threads main loop: called by Thread.start(); + */ + public void run() { + + ExecutorService heartbeat = null; // this job's heartbeat scheduled executor + + try { + stopped = new AtomicBoolean( false ); + finished = new AtomicBoolean( false ); + initialized = new AtomicBoolean( true ); + + // Create a new variable name space as we want jobs to have their own set of variables. + // initialize from parentJob or null + // + variables.initializeVariablesFrom( parentJob ); + setInternalKettleVariables( variables ); + copyParametersFrom( jobMeta ); + activateParameters(); + + // Run the job + // + fireJobStartListeners(); + + heartbeat = startHeartbeat( getHeartbeatIntervalInSeconds() ); + + result = execute(); + } catch ( Throwable je ) { + log.logError( BaseMessages.getString( PKG, "Job.Log.ErrorExecJob", je.getMessage() ), je ); + // log.logError(Const.getStackTracker(je)); + // + // we don't have result object because execute() threw a curve-ball. + // So we create a new error object. + // + result = new Result(); + result.setNrErrors( 1L ); + result.setResult( false ); + addErrors( 1 ); // This can be before actual execution + + emergencyWriteJobTracker( result ); + + active.set( false ); + finished.set( true ); + stopped.set( false ); + } finally { + try { + + shutdownHeartbeat( heartbeat ); + + ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.JobFinish.id, this ); + + fireJobFinishListeners(); + } catch ( KettleException e ) { + result.setNrErrors( 1 ); + result.setResult( false ); + log.logError( BaseMessages.getString( PKG, "Job.Log.ErrorExecJob", e.getMessage() ), e ); + + emergencyWriteJobTracker( result ); + } + } + } + + private void emergencyWriteJobTracker( Result res ) { + JobEntryResult jerFinalResult = + new JobEntryResult( res, this.getLogChannelId(), BaseMessages.getString( PKG, "Job.Comment.JobFinished" ), null, + null, 0, null ); + JobTracker finalTrack = new JobTracker( this.getJobMeta(), jerFinalResult ); + // jobTracker is up to date too. + this.jobTracker.addJobTracker( finalTrack ); + } + + /** + * Execute a job without previous results. This is a job entry point (not recursive)
+ *
+ * + * @return the result of the execution + * + * @throws KettleException + */ + private Result execute() throws KettleException { + try { + log.snap( Metrics.METRIC_JOB_START ); + + finished.set( false ); + stopped.set( false ); + KettleEnvironment.setExecutionInformation( this, rep ); + + log.logMinimal( BaseMessages.getString( PKG, "Job.Comment.JobStarted" ) ); + + ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.JobStart.id, this ); + + // Start the tracking... + JobEntryResult jerStart = + new JobEntryResult( null, null, BaseMessages.getString( PKG, "Job.Comment.JobStarted" ), BaseMessages + .getString( PKG, "Job.Reason.Started" ), null, 0, null ); + jobTracker.addJobTracker( new JobTracker( jobMeta, jerStart ) ); + + active.set( true ); + + // Where do we start? + JobEntryCopy startpoint; + + // synchronize this to a parent job if needed. + // + Object syncObject = this; + if ( parentJob != null ) { + syncObject = parentJob; // parallel execution in a job + } + + synchronized ( syncObject ) { + beginProcessing(); + } + + Result res = null; + + if ( startJobEntryCopy == null ) { + startpoint = jobMeta.findJobEntry( JobMeta.STRING_SPECIAL_START, 0, false ); + } else { + startpoint = startJobEntryCopy; + res = startJobEntryResult; + } + if ( startpoint == null ) { + throw new KettleJobException( BaseMessages.getString( PKG, "Job.Log.CounldNotFindStartingPoint" ) ); + } + + JobEntryResult jerEnd = null; + + if ( startpoint.isStart() ) { + // Perform optional looping in the special Start job entry... + // + // long iteration = 0; + + boolean isFirst = true; + JobEntrySpecial jes = (JobEntrySpecial) startpoint.getEntry(); + while ( ( jes.isRepeat() || isFirst ) && !isStopped() ) { + isFirst = false; + res = execute( 0, null, startpoint, null, BaseMessages.getString( PKG, "Job.Reason.Started" ) ); + + // + // if (iteration > 0 && (iteration % 500) == 0) { + // System.out.println("other 500 iterations: " + iteration); + // } + + // iteration++; + // + } + jerEnd = + new JobEntryResult( res, jes.getLogChannelId(), BaseMessages.getString( PKG, "Job.Comment.JobFinished" ), + BaseMessages.getString( PKG, "Job.Reason.Finished" ), null, 0, null ); + } else { + res = execute( 0, res, startpoint, null, BaseMessages.getString( PKG, "Job.Reason.Started" ) ); + jerEnd = + new JobEntryResult( res, startpoint.getEntry().getLogChannel().getLogChannelId(), BaseMessages.getString( + PKG, "Job.Comment.JobFinished" ), BaseMessages.getString( PKG, "Job.Reason.Finished" ), null, 0, null ); + } + // Save this result... + jobTracker.addJobTracker( new JobTracker( jobMeta, jerEnd ) ); + log.logMinimal( BaseMessages.getString( PKG, "Job.Comment.JobFinished" ) ); + + active.set( false ); + finished.set( true ); + + return res; + } finally { + log.snap( Metrics.METRIC_JOB_STOP ); + } + } + + /** + * Execute a job with previous results passed in.
+ *
+ * Execute called by JobEntryJob: don't clear the jobEntryResults. + * + * @param nr + * The job entry number + * @param result + * the result of the previous execution + * @return Result of the job execution + * @throws KettleJobException + */ + public Result execute( int nr, Result result ) throws KettleException { + finished.set( false ); + active.set( true ); + initialized.set( true ); + KettleEnvironment.setExecutionInformation( this, rep ); + + // Where do we start? + JobEntryCopy startpoint; + + // Perhaps there is already a list of input rows available? + if ( getSourceRows() != null ) { + result.setRows( getSourceRows() ); + } + + startpoint = jobMeta.findJobEntry( JobMeta.STRING_SPECIAL_START, 0, false ); + if ( startpoint == null ) { + throw new KettleJobException( BaseMessages.getString( PKG, "Job.Log.CounldNotFindStartingPoint" ) ); + } + + Result res = execute( nr, result, startpoint, null, BaseMessages.getString( PKG, "Job.Reason.StartOfJobentry" ) ); + + active.set( false ); + + return res; + } + + /** + * Sets the finished flag. Then launch all the job listeners and call the jobFinished method for each.
+ * + * @see JobListener#jobFinished(Job) + */ + public void fireJobFinishListeners() throws KettleException { + synchronized ( jobListeners ) { + for ( JobListener jobListener : jobListeners ) { + jobListener.jobFinished( this ); + } + } + } + + /** + * Call all the jobStarted method for each listener.
+ * + * @see JobListener#jobStarted(Job) + */ + public void fireJobStartListeners() throws KettleException { + synchronized ( jobListeners ) { + for ( JobListener jobListener : jobListeners ) { + jobListener.jobStarted( this ); + } + } + } + + /** + * Execute a job entry recursively and move to the next job entry automatically.
+ * Uses a back-tracking algorithm.
+ * + * @param nr + * @param prev_result + * @param jobEntryCopy + * @param previous + * @param reason + * @return + * @throws KettleException + */ + private Result execute( final int nr, Result prev_result, final JobEntryCopy jobEntryCopy, JobEntryCopy previous, + String reason ) throws KettleException { + Result res = null; + + if ( stopped.get() ) { + res = new Result( nr ); + res.stopped = true; + return res; + } + + // if we didn't have a previous result, create one, otherwise, copy the content... + // + final Result newResult; + Result prevResult = null; + if ( prev_result != null ) { + prevResult = prev_result.clone(); + } else { + prevResult = new Result(); + } + + JobExecutionExtension extension = new JobExecutionExtension( this, prevResult, jobEntryCopy, true ); + ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.JobBeforeJobEntryExecution.id, extension ); + + if ( extension.result != null ) { + prevResult = extension.result; + } + + if ( !extension.executeEntry ) { + newResult = prevResult; + } else { + if ( log.isDetailed() ) { + log.logDetailed( "exec(" + nr + ", " + ( prev_result != null ? prev_result.getNrErrors() : 0 ) + ", " + + ( jobEntryCopy != null ? jobEntryCopy.toString() : "null" ) + ")" ); + } + + // Which entry is next? + JobEntryInterface jobEntryInterface = jobEntryCopy.getEntry(); + jobEntryInterface.getLogChannel().setLogLevel( logLevel ); + + // Track the fact that we are going to launch the next job entry... + JobEntryResult jerBefore = + new JobEntryResult( null, null, BaseMessages.getString( PKG, "Job.Comment.JobStarted" ), reason, jobEntryCopy + .getName(), jobEntryCopy.getNr(), environmentSubstitute( jobEntryCopy.getEntry().getFilename() ) ); + jobTracker.addJobTracker( new JobTracker( jobMeta, jerBefore ) ); + + ClassLoader cl = Thread.currentThread().getContextClassLoader(); + Thread.currentThread().setContextClassLoader( jobEntryInterface.getClass().getClassLoader() ); + // Execute this entry... + JobEntryInterface cloneJei = (JobEntryInterface) jobEntryInterface.clone(); + ( (VariableSpace) cloneJei ).copyVariablesFrom( this ); + cloneJei.setRepository( rep ); + if ( rep != null ) { + cloneJei.setMetaStore( rep.getMetaStore() ); + } + cloneJei.setParentJob( this ); + final long start = System.currentTimeMillis(); + + cloneJei.getLogChannel().logDetailed( "Starting job entry" ); + for ( JobEntryListener jobEntryListener : jobEntryListeners ) { + jobEntryListener.beforeExecution( this, jobEntryCopy, cloneJei ); + } + if ( interactive ) { + if ( jobEntryCopy.isTransformation() ) { + getActiveJobEntryTransformations().put( jobEntryCopy, (JobEntryTrans) cloneJei ); + } + if ( jobEntryCopy.isJob() ) { + getActiveJobEntryJobs().put( jobEntryCopy, (JobEntryJob) cloneJei ); + } + } + log.snap( Metrics.METRIC_JOBENTRY_START, cloneJei.toString() ); + newResult = cloneJei.execute( prevResult, nr ); + log.snap( Metrics.METRIC_JOBENTRY_STOP, cloneJei.toString() ); + + final long end = System.currentTimeMillis(); + if ( interactive ) { + if ( jobEntryCopy.isTransformation() ) { + getActiveJobEntryTransformations().remove( jobEntryCopy ); + } + if ( jobEntryCopy.isJob() ) { + getActiveJobEntryJobs().remove( jobEntryCopy ); + } + } + + if ( cloneJei instanceof JobEntryTrans ) { + String throughput = newResult.getReadWriteThroughput( (int) ( ( end - start ) / 1000 ) ); + if ( throughput != null ) { + log.logMinimal( throughput ); + } + } + for ( JobEntryListener jobEntryListener : jobEntryListeners ) { + jobEntryListener.afterExecution( this, jobEntryCopy, cloneJei, newResult ); + } + + Thread.currentThread().setContextClassLoader( cl ); + addErrors( (int) newResult.getNrErrors() ); + + // Also capture the logging text after the execution... + // + LoggingBuffer loggingBuffer = KettleLogStore.getAppender(); + StringBuffer logTextBuffer = loggingBuffer.getBuffer( cloneJei.getLogChannel().getLogChannelId(), false ); + newResult.setLogText( logTextBuffer.toString() + newResult.getLogText() ); + + // Save this result as well... + // + JobEntryResult jerAfter = + new JobEntryResult( newResult, cloneJei.getLogChannel().getLogChannelId(), BaseMessages.getString( PKG, + "Job.Comment.JobFinished" ), null, jobEntryCopy.getName(), jobEntryCopy.getNr(), environmentSubstitute( + jobEntryCopy.getEntry().getFilename() ) ); + jobTracker.addJobTracker( new JobTracker( jobMeta, jerAfter ) ); + synchronized ( jobEntryResults ) { + jobEntryResults.add( jerAfter ); + + // Only keep the last X job entry results in memory + // + if ( maxJobEntriesLogged > 0 ) { + while ( jobEntryResults.size() > maxJobEntriesLogged ) { + // Remove the oldest. + jobEntryResults.removeFirst(); + } + } + } + } + + extension = new JobExecutionExtension( this, prevResult, jobEntryCopy, extension.executeEntry ); + ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.JobAfterJobEntryExecution.id, extension ); + + // Try all next job entries. + // + // Keep track of all the threads we fired in case of parallel execution... + // Keep track of the results of these executions too. + // + final List threads = new ArrayList(); + // next 2 lists is being modified concurrently so must be synchronized for this case. + final Queue threadResults = new ConcurrentLinkedQueue(); + final Queue threadExceptions = new ConcurrentLinkedQueue(); + final List threadEntries = new ArrayList(); + + // Launch only those where the hop indicates true or false + // + int nrNext = jobMeta.findNrNextJobEntries( jobEntryCopy ); + for ( int i = 0; i < nrNext && !isStopped(); i++ ) { + // The next entry is... + final JobEntryCopy nextEntry = jobMeta.findNextJobEntry( jobEntryCopy, i ); + + // See if we need to execute this... + final JobHopMeta hi = jobMeta.findJobHop( jobEntryCopy, nextEntry ); + + // The next comment... + final String nextComment; + if ( hi.isUnconditional() ) { + nextComment = BaseMessages.getString( PKG, "Job.Comment.FollowedUnconditional" ); + } else { + if ( newResult.getResult() ) { + nextComment = BaseMessages.getString( PKG, "Job.Comment.FollowedSuccess" ); + } else { + nextComment = BaseMessages.getString( PKG, "Job.Comment.FollowedFailure" ); + } + } + + // + // If the link is unconditional, execute the next job entry (entries). + // If the start point was an evaluation and the link color is correct: + // green or red, execute the next job entry... + // + if ( hi.isUnconditional() || ( jobEntryCopy.evaluates() && ( !( hi.getEvaluation() ^ newResult + .getResult() ) ) ) ) { + // Start this next step! + if ( log.isBasic() ) { + log.logBasic( BaseMessages.getString( PKG, "Job.Log.StartingEntry", nextEntry.getName() ) ); + } + + // Pass along the previous result, perhaps the next job can use it... + // However, set the number of errors back to 0 (if it should be reset) + // When an evaluation is executed the errors e.g. should not be reset. + if ( nextEntry.resetErrorsBeforeExecution() ) { + newResult.setNrErrors( 0 ); + } + + // Now execute! + // + // if (we launch in parallel, fire the execution off in a new thread... + // + if ( jobEntryCopy.isLaunchingInParallel() ) { + threadEntries.add( nextEntry ); + + Runnable runnable = new Runnable() { + public void run() { + try { + Result threadResult = execute( nr + 1, newResult, nextEntry, jobEntryCopy, nextComment ); + threadResults.add( threadResult ); + } catch ( Throwable e ) { + log.logError( Const.getStackTracker( e ) ); + threadExceptions.add( new KettleException( BaseMessages.getString( PKG, "Job.Log.UnexpectedError", + nextEntry.toString() ), e ) ); + Result threadResult = new Result(); + threadResult.setResult( false ); + threadResult.setNrErrors( 1L ); + threadResults.add( threadResult ); + } + } + }; + Thread thread = new Thread( runnable ); + threads.add( thread ); + thread.start(); + if ( log.isBasic() ) { + log.logBasic( BaseMessages.getString( PKG, "Job.Log.LaunchedJobEntryInParallel", nextEntry.getName() ) ); + } + } else { + try { + // Same as before: blocks until it's done + // + res = execute( nr + 1, newResult, nextEntry, jobEntryCopy, nextComment ); + } catch ( Throwable e ) { + log.logError( Const.getStackTracker( e ) ); + throw new KettleException( BaseMessages.getString( PKG, "Job.Log.UnexpectedError", nextEntry.toString() ), + e ); + } + if ( log.isBasic() ) { + log.logBasic( BaseMessages.getString( PKG, "Job.Log.FinishedJobEntry", nextEntry.getName(), res.getResult() + + "" ) ); + } + } + } + } + + // OK, if we run in parallel, we need to wait for all the job entries to + // finish... + // + if ( jobEntryCopy.isLaunchingInParallel() ) { + for ( int i = 0; i < threads.size(); i++ ) { + Thread thread = threads.get( i ); + JobEntryCopy nextEntry = threadEntries.get( i ); + + try { + thread.join(); + } catch ( InterruptedException e ) { + log.logError( jobMeta.toString(), BaseMessages.getString( PKG, + "Job.Log.UnexpectedErrorWhileWaitingForJobEntry", nextEntry.getName() ) ); + threadExceptions.add( new KettleException( BaseMessages.getString( PKG, + "Job.Log.UnexpectedErrorWhileWaitingForJobEntry", nextEntry.getName() ), e ) ); + } + } + // if(log.isBasic()) log.logBasic(BaseMessages.getString(PKG, + // "Job.Log.FinishedJobEntry",startpoint.getName(),res.getResult()+"")); + } + + // Perhaps we don't have next steps?? + // In this case, return the previous result. + if ( res == null ) { + res = prevResult; + } + + // See if there where any errors in the parallel execution + // + if ( threadExceptions.size() > 0 ) { + res.setResult( false ); + res.setNrErrors( threadExceptions.size() ); + + for ( KettleException e : threadExceptions ) { + log.logError( jobMeta.toString(), e.getMessage(), e ); + } + + // Now throw the first Exception for good measure... + // + throw threadExceptions.poll(); + } + + // In parallel execution, we aggregate all the results, simply add them to + // the previous result... + // + for ( Result threadResult : threadResults ) { + res.add( threadResult ); + } + + // If there have been errors, logically, we need to set the result to + // "false"... + // + if ( res.getNrErrors() > 0 ) { + res.setResult( false ); + } + + return res; + } + + /** + * Wait until this job has finished. + */ + public void waitUntilFinished() { + waitUntilFinished( -1L ); + } + + /** + * Wait until this job has finished. + * + * @param maxMiliseconds + * the maximum number of ms to wait + */ + public void waitUntilFinished( long maxMiliseconds ) { + long time = 0L; + while ( isAlive() && ( time < maxMiliseconds || maxMiliseconds <= 0 ) ) { + try { + Thread.sleep( 1 ); + time += 1; + } catch ( InterruptedException e ) { + // Ignore sleep errors + } + } + } + + /** + * Get the number of errors that happened in the job. + * + * @return nr of error that have occurred during execution. During execution of a job the number can change. + */ + public int getErrors() { + return errors.get(); + } + + /** + * Set the number of occured errors to 0. + */ + public void resetErrors() { + errors.set( 0 ); + } + + /** + * Add a number of errors to the total number of erros that occured during execution. + * + * @param nrToAdd + * nr of errors to add. + */ + public void addErrors( int nrToAdd ) { + if ( nrToAdd > 0 ) { + errors.addAndGet( nrToAdd ); + } + } + + /** + * Handle logging at start + * + * @return true if it went OK. + * + * @throws KettleException + */ + public boolean beginProcessing() throws KettleException { + currentDate = new Date(); + logDate = new Date(); + startDate = Const.MIN_DATE; + endDate = currentDate; + + resetErrors(); + + final JobLogTable jobLogTable = jobMeta.getJobLogTable(); + int intervalInSeconds = Const.toInt( environmentSubstitute( jobLogTable.getLogInterval() ), -1 ); + + if ( jobLogTable.isDefined() ) { + + DatabaseMeta logcon = jobMeta.getJobLogTable().getDatabaseMeta(); + String schemaName = environmentSubstitute( jobMeta.getJobLogTable().getActualSchemaName() ); + String tableName = environmentSubstitute( jobMeta.getJobLogTable().getActualTableName() ); + String schemaAndTable = + jobMeta.getJobLogTable().getDatabaseMeta().getQuotedSchemaTableCombination( schemaName, tableName ); + Database ldb = new Database( this, logcon ); + ldb.shareVariablesWith( this ); + ldb.connect(); + ldb.setCommit( logCommitSize ); + + try { + // See if we have to add a batch id... + Long id_batch = new Long( 1 ); + if ( jobMeta.getJobLogTable().isBatchIdUsed() ) { + id_batch = logcon.getNextBatchId( ldb, schemaName, tableName, jobLogTable.getKeyField().getFieldName() ); + setBatchId( id_batch.longValue() ); + if ( getPassedBatchId() <= 0 ) { + setPassedBatchId( id_batch.longValue() ); + } + } + + Object[] lastr = ldb.getLastLogDate( schemaAndTable, jobMeta.getName(), true, LogStatus.END ); + if ( !Const.isEmpty( lastr ) ) { + Date last; + try { + last = ldb.getReturnRowMeta().getDate( lastr, 0 ); + } catch ( KettleValueException e ) { + throw new KettleJobException( BaseMessages.getString( PKG, "Job.Log.ConversionError", "" + tableName ), e ); + } + if ( last != null ) { + startDate = last; + } + } + + depDate = currentDate; + + ldb.writeLogRecord( jobMeta.getJobLogTable(), LogStatus.START, this, null ); + if ( !ldb.isAutoCommit() ) { + ldb.commitLog( true, jobMeta.getJobLogTable() ); + } + ldb.disconnect(); + + // If we need to do periodic logging, make sure to install a timer for + // this... + // + if ( intervalInSeconds > 0 ) { + final Timer timer = new Timer( getName() + " - interval logging timer" ); + TimerTask timerTask = new TimerTask() { + public void run() { + try { + endProcessing(); + } catch ( Exception e ) { + log.logError( BaseMessages.getString( PKG, "Job.Exception.UnableToPerformIntervalLogging" ), e ); + // Also stop the show... + // + + errors.incrementAndGet(); + stopAll(); + } + } + }; + timer.schedule( timerTask, intervalInSeconds * 1000, intervalInSeconds * 1000 ); + + addJobListener( new JobAdapter() { + public void jobFinished( Job job ) { + timer.cancel(); + } + } ); + } + + // Add a listener at the end of the job to take of writing the final job + // log record... + // + addJobListener( new JobAdapter() { + public void jobFinished( Job job ) throws KettleException { + try { + endProcessing(); + } catch ( KettleJobException e ) { + log.logError( BaseMessages.getString( PKG, "Job.Exception.UnableToWriteToLoggingTable", jobLogTable + .toString() ), e ); + // do not skip exception here + // job is failed in case log database record is failed! + throw new KettleException( e ); + } + } + } ); + + } catch ( KettleDatabaseException dbe ) { + addErrors( 1 ); // This is even before actual execution + throw new KettleJobException( BaseMessages.getString( PKG, "Job.Log.UnableToProcessLoggingStart", "" + + tableName ), dbe ); + } finally { + ldb.disconnect(); + } + } + + // If we need to write out the job entry logging information, do so at the end of the job: + // + JobEntryLogTable jobEntryLogTable = jobMeta.getJobEntryLogTable(); + if ( jobEntryLogTable.isDefined() ) { + addJobListener( new JobAdapter() { + public void jobFinished( Job job ) throws KettleException { + try { + writeJobEntryLogInformation(); + } catch ( KettleException e ) { + throw new KettleException( BaseMessages.getString( PKG, + "Job.Exception.UnableToPerformJobEntryLoggingAtJobEnd" ), e ); + } + } + } ); + } + + // If we need to write the log channel hierarchy and lineage information, + // add a listener for that too... + // + ChannelLogTable channelLogTable = jobMeta.getChannelLogTable(); + if ( channelLogTable.isDefined() ) { + addJobListener( new JobAdapter() { + + public void jobFinished( Job job ) throws KettleException { + try { + writeLogChannelInformation(); + } catch ( KettleException e ) { + throw new KettleException( BaseMessages.getString( PKG, "Job.Exception.UnableToPerformLoggingAtTransEnd" ), + e ); + } + } + } ); + } + + JobExecutionExtension extension = new JobExecutionExtension( this, result, null, false ); + ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.JobBeginProcessing.id, extension ); + + return true; + } + + // + // Handle logging at end + /** + * End processing. + * + * @return true, if successful + * @throws KettleJobException + * the kettle job exception + */ + private boolean endProcessing() throws KettleJobException { + LogStatus status; + if ( !isActive() ) { + if ( isStopped() ) { + status = LogStatus.STOP; + } else { + status = LogStatus.END; + } + } else { + status = LogStatus.RUNNING; + } + try { + if ( errors.get() == 0 && result != null && !result.getResult() ) { + errors.incrementAndGet(); + } + + logDate = new Date(); + + /* + * Sums errors, read, written, etc. + */ + + JobLogTable jobLogTable = jobMeta.getJobLogTable(); + if ( jobLogTable.isDefined() ) { + + writeLogTableInformation( jobLogTable, status ); + } + + return true; + } catch ( Exception e ) { + throw new KettleJobException( e ); // In case something else goes wrong. + } + } + + /** + * Writes information to Job Log table. + * Cleans old records, in case job is finished. + * + */ + protected void writeLogTableInformation( JobLogTable jobLogTable, LogStatus status ) + throws KettleJobException, KettleDatabaseException { + boolean cleanLogRecords = status.equals( LogStatus.END ); + String tableName = jobLogTable.getActualTableName(); + DatabaseMeta logcon = jobLogTable.getDatabaseMeta(); + + Database ldb = createDataBase( logcon ); + ldb.shareVariablesWith( this ); + try { + ldb.connect(); + ldb.setCommit( logCommitSize ); + ldb.writeLogRecord( jobLogTable, status, this, null ); + + if ( cleanLogRecords ) { + ldb.cleanupLogRecords( jobLogTable ); + } + + } catch ( KettleDatabaseException dbe ) { + addErrors( 1 ); + throw new KettleJobException( "Unable to end processing by writing log record to table " + tableName, dbe ); + } finally { + if ( !ldb.isAutoCommit() ) { + ldb.commitLog( true, jobLogTable ); + } + ldb.disconnect(); + } + } + /** + * Write log channel information. + * + * @throws KettleException + * the kettle exception + */ + protected void writeLogChannelInformation() throws KettleException { + Database db = null; + ChannelLogTable channelLogTable = jobMeta.getChannelLogTable(); + + // PDI-7070: If parent job has the same channel logging info, don't duplicate log entries + Job j = getParentJob(); + + if ( j != null ) { + if ( channelLogTable.equals( j.getJobMeta().getChannelLogTable() ) ) { + return; + } + } + // end PDI-7070 + + try { + db = new Database( this, channelLogTable.getDatabaseMeta() ); + db.shareVariablesWith( this ); + db.connect(); + db.setCommit( logCommitSize ); + + List loggingHierarchyList = getLoggingHierarchy(); + for ( LoggingHierarchy loggingHierarchy : loggingHierarchyList ) { + db.writeLogRecord( channelLogTable, LogStatus.START, loggingHierarchy, null ); + } + + // Also time-out the log records in here... + // + db.cleanupLogRecords( channelLogTable ); + + } catch ( Exception e ) { + throw new KettleException( BaseMessages.getString( PKG, + "Trans.Exception.UnableToWriteLogChannelInformationToLogTable" ), e ); + } finally { + if ( !db.isAutoCommit() ) { + db.commit( true ); + } + db.disconnect(); + } + } + + /** + * Write job entry log information. + * + * @throws KettleException + * the kettle exception + */ + protected void writeJobEntryLogInformation() throws KettleException { + Database db = null; + JobEntryLogTable jobEntryLogTable = getJobMeta().getJobEntryLogTable(); + try { + db = createDataBase( jobEntryLogTable.getDatabaseMeta() ); + db.shareVariablesWith( this ); + db.connect(); + db.setCommit( logCommitSize ); + + for ( JobEntryCopy copy : getJobMeta().getJobCopies() ) { + db.writeLogRecord( jobEntryLogTable, LogStatus.START, copy, this ); + } + + db.cleanupLogRecords( jobEntryLogTable ); + } catch ( Exception e ) { + throw new KettleException( BaseMessages.getString( PKG, "Job.Exception.UnableToJobEntryInformationToLogTable" ), + e ); + } finally { + if ( !db.isAutoCommit() ) { + db.commitLog( true, jobEntryLogTable ); + } + db.disconnect(); + } + } + + protected Database createDataBase( DatabaseMeta databaseMeta ) { + return new Database( this, databaseMeta ); + } + + /** + * Checks if is active. + * + * @return true, if is active + */ + public boolean isActive() { + return active.get(); + } + + /** + * Stop all activity by setting the stopped property to true. + */ + public void stopAll() { + stopped.set( true ); + } + + /** + * Sets the stopped. + * + * @param stopped + * the new stopped + */ + public void setStopped( boolean stopped ) { + this.stopped.set( stopped ); + } + + /** + * Gets the stopped status of this Job. + * + * @return Returns the stopped status of this Job + */ + public boolean isStopped() { + return stopped.get(); + } + + /** + * Gets the start date. + * + * @return Returns the startDate + */ + public Date getStartDate() { + return startDate; + } + + /** + * Gets the end date. + * + * @return Returns the endDate + */ + public Date getEndDate() { + return endDate; + } + + /** + * Gets the current date. + * + * @return Returns the currentDate + */ + public Date getCurrentDate() { + return currentDate; + } + + /** + * Gets the dep date. + * + * @return Returns the depDate + */ + public Date getDepDate() { + return depDate; + } + + /** + * Gets the log date. + * + * @return Returns the logDate + */ + public Date getLogDate() { + return logDate; + } + + /** + * Gets the job meta. + * + * @return Returns the JobMeta + */ + public JobMeta getJobMeta() { + return jobMeta; + } + + /** + * Gets the rep (repository). + * + * @return Returns the rep + */ + public Repository getRep() { + return rep; + } + + /** + * Gets the thread. + * + * @return the thread + */ + public Thread getThread() { + return this; + } + + /** + * Gets the job tracker. + * + * @return Returns the jobTracker + */ + public JobTracker getJobTracker() { + return jobTracker; + } + + /** + * Sets the job tracker. + * + * @param jobTracker + * The jobTracker to set + */ + public void setJobTracker( JobTracker jobTracker ) { + this.jobTracker = jobTracker; + } + + /** + * Sets the source rows. + * + * @param sourceRows + * the new source rows + */ + public void setSourceRows( List sourceRows ) { + this.sourceRows = sourceRows; + } + + /** + * Gets the source rows. + * + * @return the source rows + */ + public List getSourceRows() { + return sourceRows; + } + + /** + * Gets the parent job. + * + * @return Returns the parentJob + */ + public Job getParentJob() { + return parentJob; + } + + /** + * Sets the parent job. + * + * @param parentJob + * The parentJob to set. + */ + public void setParentJob( Job parentJob ) { + this.logLevel = parentJob.getLogLevel(); + this.log.setLogLevel( logLevel ); + this.containerObjectId = log.getContainerObjectId(); + this.parentJob = parentJob; + } + + /** + * Gets the result. + * + * @return the result + */ + public Result getResult() { + return result; + } + + /** + * Sets the result. + * + * @param result + * the new result + */ + public void setResult( Result result ) { + this.result = result; + } + + /** + * Gets the boolean value of initialized. + * + * @return Returns the initialized + */ + public boolean isInitialized() { + return initialized.get(); + } + + /** + * Gets the batchId. + * + * @return Returns the batchId + */ + public long getBatchId() { + return batchId; + } + + /** + * Sets the batchId. + * + * @param batchId + * The batchId to set + */ + public void setBatchId( long batchId ) { + this.batchId = batchId; + } + + /** + * Gets the passedBatchId. + * + * @return the passedBatchId + */ + public long getPassedBatchId() { + return passedBatchId; + } + + /** + * Sets the passedBatchId. + * + * @param jobBatchId + * the jobBatchId to set + */ + public void setPassedBatchId( long jobBatchId ) { + this.passedBatchId = jobBatchId; + } + + /** + * Sets the internal kettle variables. + * + * @param var + * the new internal kettle variables. + */ + public void setInternalKettleVariables( VariableSpace var ) { + if ( jobMeta != null && jobMeta.getFilename() != null ) { // we have a finename that's defined. + try { + FileObject fileObject = KettleVFS.getFileObject( jobMeta.getFilename(), this ); + FileName fileName = fileObject.getName(); + + // The filename of the transformation + variables.setVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_NAME, fileName.getBaseName() ); + + // The directory of the transformation + FileName fileDir = fileName.getParent(); + variables.setVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY, fileDir.getURI() ); + } catch ( Exception e ) { + variables.setVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY, "" ); + variables.setVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_NAME, "" ); + } + } else { + variables.setVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY, "" ); + variables.setVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_NAME, "" ); + } + + boolean hasRepoDir = jobMeta.getRepositoryDirectory() != null && jobMeta.getRepository() != null; + + // The name of the job + variables.setVariable( Const.INTERNAL_VARIABLE_JOB_NAME, Const.NVL( jobMeta.getName(), "" ) ); + + // The name of the directory in the repository + variables.setVariable( Const.INTERNAL_VARIABLE_JOB_REPOSITORY_DIRECTORY, hasRepoDir ? jobMeta + .getRepositoryDirectory().getPath() : "" ); + + // setup fallbacks + if ( hasRepoDir ) { + variables.setVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY, variables.getVariable( + Const.INTERNAL_VARIABLE_JOB_REPOSITORY_DIRECTORY ) ); + } else { + variables.setVariable( Const.INTERNAL_VARIABLE_JOB_REPOSITORY_DIRECTORY, variables.getVariable( + Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY ) ); + } + + if ( hasRepoDir ) { + variables.setVariable( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY, variables.getVariable( + Const.INTERNAL_VARIABLE_JOB_REPOSITORY_DIRECTORY ) ); + if ( "/".equals( variables.getVariable( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY ) ) ) { + variables.setVariable( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY, "" ); + } + } else { + variables.setVariable( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY, variables.getVariable( + Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY ) ); + } + } + + /* + * (non-Javadoc) + * + * @see org.pentaho.di.core.variables.VariableSpace#copyVariablesFrom(org.pentaho.di.core.variables.VariableSpace) + */ + public void copyVariablesFrom( VariableSpace space ) { + variables.copyVariablesFrom( space ); + } + + /* + * (non-Javadoc) + * + * @see org.pentaho.di.core.variables.VariableSpace#environmentSubstitute(java.lang.String) + */ + public String environmentSubstitute( String aString ) { + return variables.environmentSubstitute( aString ); + } + + /* + * (non-Javadoc) + * + * @see org.pentaho.di.core.variables.VariableSpace#environmentSubstitute(java.lang.String[]) + */ + public String[] environmentSubstitute( String[] aString ) { + return variables.environmentSubstitute( aString ); + } + + public String fieldSubstitute( String aString, RowMetaInterface rowMeta, Object[] rowData ) + throws KettleValueException { + return variables.fieldSubstitute( aString, rowMeta, rowData ); + } + + /* + * (non-Javadoc) + * + * @see org.pentaho.di.core.variables.VariableSpace#getParentVariableSpace() + */ + public VariableSpace getParentVariableSpace() { + return variables.getParentVariableSpace(); + } + + /* + * (non-Javadoc) + * + * @see + * org.pentaho.di.core.variables.VariableSpace#setParentVariableSpace(org.pentaho.di.core.variables.VariableSpace) + */ + public void setParentVariableSpace( VariableSpace parent ) { + variables.setParentVariableSpace( parent ); + } + + /* + * (non-Javadoc) + * + * @see org.pentaho.di.core.variables.VariableSpace#getVariable(java.lang.String, java.lang.String) + */ + public String getVariable( String variableName, String defaultValue ) { + return variables.getVariable( variableName, defaultValue ); + } + + /* + * (non-Javadoc) + * + * @see org.pentaho.di.core.variables.VariableSpace#getVariable(java.lang.String) + */ + public String getVariable( String variableName ) { + return variables.getVariable( variableName ); + } + + /* + * (non-Javadoc) + * + * @see org.pentaho.di.core.variables.VariableSpace#getBooleanValueOfVariable(java.lang.String, boolean) + */ + public boolean getBooleanValueOfVariable( String variableName, boolean defaultValue ) { + if ( !Const.isEmpty( variableName ) ) { + String value = environmentSubstitute( variableName ); + if ( !Const.isEmpty( value ) ) { + return ValueMeta.convertStringToBoolean( value ); + } + } + return defaultValue; + } + + /* + * (non-Javadoc) + * + * @see + * org.pentaho.di.core.variables.VariableSpace#initializeVariablesFrom(org.pentaho.di.core.variables.VariableSpace) + */ + public void initializeVariablesFrom( VariableSpace parent ) { + variables.initializeVariablesFrom( parent ); + } + + /* + * (non-Javadoc) + * + * @see org.pentaho.di.core.variables.VariableSpace#listVariables() + */ + public String[] listVariables() { + return variables.listVariables(); + } + + /* + * (non-Javadoc) + * + * @see org.pentaho.di.core.variables.VariableSpace#setVariable(java.lang.String, java.lang.String) + */ + public void setVariable( String variableName, String variableValue ) { + variables.setVariable( variableName, variableValue ); + } + + /* + * (non-Javadoc) + * + * @see org.pentaho.di.core.variables.VariableSpace#shareVariablesWith(org.pentaho.di.core.variables.VariableSpace) + */ + public void shareVariablesWith( VariableSpace space ) { + variables = space; + } + + /* + * (non-Javadoc) + * + * @see org.pentaho.di.core.variables.VariableSpace#injectVariables(java.util.Map) + */ + public void injectVariables( Map prop ) { + variables.injectVariables( prop ); + } + + /** + * Gets the status. + * + * @return the status + */ + public String getStatus() { + String message; + + if ( !initialized.get() ) { + message = Trans.STRING_WAITING; + } else { + if ( active.get() ) { + if ( stopped.get() ) { + message = Trans.STRING_HALTING; + } else { + message = Trans.STRING_RUNNING; + } + } else { + if ( stopped.get() ) { + message = Trans.STRING_STOPPED; + } else { + message = Trans.STRING_FINISHED; + } + if ( result != null && result.getNrErrors() > 0 ) { + message += " (with errors)"; + } + } + } + + return message; + } + + /** + * Send to slave server. + * + * @param jobMeta + * the job meta + * @param executionConfiguration + * the execution configuration + * @param repository + * the repository + * @param metaStore + * the metaStore + * @return the string + * @throws KettleException + * the kettle exception + */ + public static String sendToSlaveServer( JobMeta jobMeta, JobExecutionConfiguration executionConfiguration, + Repository repository, IMetaStore metaStore ) throws KettleException { + String carteObjectId; + SlaveServer slaveServer = executionConfiguration.getRemoteServer(); + + if ( slaveServer == null ) { + throw new KettleException( BaseMessages.getString( PKG, "Job.Log.NoSlaveServerSpecified" ) ); + } + if ( Const.isEmpty( jobMeta.getName() ) ) { + throw new KettleException( BaseMessages.getString( PKG, "Job.Log.UniqueJobName" ) ); + } + + // Align logging levels between execution configuration and remote server + slaveServer.getLogChannel().setLogLevel( executionConfiguration.getLogLevel() ); + + try { + // Inject certain internal variables to make it more intuitive. + // + for ( String var : Const.INTERNAL_TRANS_VARIABLES ) { + executionConfiguration.getVariables().put( var, jobMeta.getVariable( var ) ); + } + for ( String var : Const.INTERNAL_JOB_VARIABLES ) { + executionConfiguration.getVariables().put( var, jobMeta.getVariable( var ) ); + } + + if ( executionConfiguration.isPassingExport() ) { + // First export the job... slaveServer.getVariable("MASTER_HOST") + // + FileObject tempFile = + KettleVFS.createTempFile( "jobExport", ".zip", System.getProperty( "java.io.tmpdir" ), jobMeta ); + + TopLevelResource topLevelResource = + ResourceUtil.serializeResourceExportInterface( tempFile.getName().toString(), jobMeta, jobMeta, repository, + metaStore, executionConfiguration.getXML(), CONFIGURATION_IN_EXPORT_FILENAME ); + + // Send the zip file over to the slave server... + // + String result = + slaveServer.sendExport( topLevelResource.getArchiveName(), AddExportServlet.TYPE_JOB, topLevelResource + .getBaseResourceName() ); + WebResult webResult = WebResult.fromXMLString( result ); + if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) { + throw new KettleException( "There was an error passing the exported job to the remote server: " + Const.CR + + webResult.getMessage() ); + } + carteObjectId = webResult.getId(); + } else { + String xml = new JobConfiguration( jobMeta, executionConfiguration ).getXML(); + + String reply = slaveServer.sendXML( xml, RegisterJobServlet.CONTEXT_PATH + "/?xml=Y" ); + WebResult webResult = WebResult.fromXMLString( reply ); + if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) { + throw new KettleException( "There was an error posting the job on the remote server: " + Const.CR + webResult + .getMessage() ); + } + carteObjectId = webResult.getId(); + } + + // Start the job + // + String reply = + slaveServer.execService( StartJobServlet.CONTEXT_PATH + "/?name=" + URLEncoder.encode( jobMeta.getName(), + "UTF-8" ) + "&xml=Y&id=" + carteObjectId ); + WebResult webResult = WebResult.fromXMLString( reply ); + if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) { + throw new KettleException( "There was an error starting the job on the remote server: " + Const.CR + webResult + .getMessage() ); + } + return carteObjectId; + } catch ( KettleException ke ) { + throw ke; + } catch ( Exception e ) { + throw new KettleException( e ); + } + } + + /** + * Add a job listener to the job + * + * @param jobListener + * the job listener to add + */ + public void addJobListener( JobListener jobListener ) { + synchronized ( jobListeners ) { + jobListeners.add( jobListener ); + } + } + + /** + * Adds the job entry listener. + * + * @param jobEntryListener + * the job entry listener + */ + public void addJobEntryListener( JobEntryListener jobEntryListener ) { + jobEntryListeners.add( jobEntryListener ); + } + + /** + * Remove a job listener from the job + * + * @param jobListener + * the job listener to remove + */ + public void removeJobListener( JobListener jobListener ) { + synchronized ( jobListeners ) { + jobListeners.remove( jobListener ); + } + } + + /** + * Remove a job entry listener from the job + * + * @param jobListener + * the job entry listener to remove + */ + public void removeJobEntryListener( JobEntryListener jobEntryListener ) { + jobEntryListeners.remove( jobEntryListener ); + } + + /** + * Gets the job entry listeners. + * + * @return the job entry listeners + */ + public List getJobEntryListeners() { + return jobEntryListeners; + } + + /** + * Gets the job listeners. + * + * @return the job listeners + */ + public List getJobListeners() { + synchronized ( jobListeners ) { + return new ArrayList( jobListeners ); + } + } + + /** + * Gets the boolean value of finished. + * + * @return the finished + */ + public boolean isFinished() { + return finished.get(); + } + + /** + * Sets the value of finished. + * + * @param finished + * the finished to set + */ + public void setFinished( boolean finished ) { + this.finished.set( finished ); + } + + /* + * (non-Javadoc) + * + * @see org.pentaho.di.core.parameters.NamedParams#addParameterDefinition(java.lang.String, java.lang.String, + * java.lang.String) + */ + public void addParameterDefinition( String key, String defValue, String description ) throws DuplicateParamException { + namedParams.addParameterDefinition( key, defValue, description ); + } + + /* + * (non-Javadoc) + * + * @see org.pentaho.di.core.parameters.NamedParams#getParameterDescription(java.lang.String) + */ + public String getParameterDescription( String key ) throws UnknownParamException { + return namedParams.getParameterDescription( key ); + } + + /* + * (non-Javadoc) + * + * @see org.pentaho.di.core.parameters.NamedParams#getParameterDefault(java.lang.String) + */ + public String getParameterDefault( String key ) throws UnknownParamException { + return namedParams.getParameterDefault( key ); + } + + /* + * (non-Javadoc) + * + * @see org.pentaho.di.core.parameters.NamedParams#getParameterValue(java.lang.String) + */ + public String getParameterValue( String key ) throws UnknownParamException { + return namedParams.getParameterValue( key ); + } + + /* + * (non-Javadoc) + * + * @see org.pentaho.di.core.parameters.NamedParams#listParameters() + */ + public String[] listParameters() { + return namedParams.listParameters(); + } + + /* + * (non-Javadoc) + * + * @see org.pentaho.di.core.parameters.NamedParams#setParameterValue(java.lang.String, java.lang.String) + */ + public void setParameterValue( String key, String value ) throws UnknownParamException { + namedParams.setParameterValue( key, value ); + } + + /* + * (non-Javadoc) + * + * @see org.pentaho.di.core.parameters.NamedParams#eraseParameters() + */ + public void eraseParameters() { + namedParams.eraseParameters(); + } + + /* + * (non-Javadoc) + * + * @see org.pentaho.di.core.parameters.NamedParams#clearParameters() + */ + public void clearParameters() { + namedParams.clearParameters(); + } + + /* + * (non-Javadoc) + * + * @see org.pentaho.di.core.parameters.NamedParams#activateParameters() + */ + public void activateParameters() { + String[] keys = listParameters(); + + for ( String key : keys ) { + String value; + try { + value = getParameterValue( key ); + } catch ( UnknownParamException e ) { + value = ""; + } + String defValue; + try { + defValue = getParameterDefault( key ); + } catch ( UnknownParamException e ) { + defValue = ""; + } + + if ( Const.isEmpty( value ) ) { + setVariable( key, Const.NVL( defValue, "" ) ); + } else { + setVariable( key, Const.NVL( value, "" ) ); + } + } + } + + /* + * (non-Javadoc) + * + * @see org.pentaho.di.core.parameters.NamedParams#copyParametersFrom(org.pentaho.di.core.parameters.NamedParams) + */ + public void copyParametersFrom( NamedParams params ) { + namedParams.copyParametersFrom( params ); + } + + /** + * Sets the socket repository. + * + * @param socketRepository + * the new socket repository + */ + public void setSocketRepository( SocketRepository socketRepository ) { + this.socketRepository = socketRepository; + } + + /** + * Gets the socket repository. + * + * @return the socket repository + */ + public SocketRepository getSocketRepository() { + return socketRepository; + } + + /** + * Gets the log channel interface. + * + * @return LogChannelInterface + */ + public LogChannelInterface getLogChannel() { + return log; + } + + /** + * Gets the job name. + * + * @return jobName + */ + public String getObjectName() { + return getJobname(); + } + + /** + * Always returns null for Job. + * + * @return null + */ + public String getObjectCopy() { + return null; + } + + /** + * Gets the file name. + * + * @return the filename + */ + public String getFilename() { + if ( jobMeta == null ) { + return null; + } + return jobMeta.getFilename(); + } + + /** + * Gets the log channel id. + * + * @return the logChannelId + */ + public String getLogChannelId() { + return log.getLogChannelId(); + } + + /** + * Gets the jobMeta's object id. + * + * @return ObjectId + */ + public ObjectId getObjectId() { + if ( jobMeta == null ) { + return null; + } + return jobMeta.getObjectId(); + } + + /** + * Gets the job meta's object revision. + * + * @return ObjectRevision + */ + public ObjectRevision getObjectRevision() { + if ( jobMeta == null ) { + return null; + } + return jobMeta.getObjectRevision(); + } + + /** + * Gets LoggingObjectType.JOB, which is always the value for Job. + * + * @return LoggingObjectType LoggingObjectType.JOB + */ + public LoggingObjectType getObjectType() { + return LoggingObjectType.JOB; + } + + /** + * Gets parent logging object. + * + * @return parentLoggingObject + */ + public LoggingObjectInterface getParent() { + return parentLoggingObject; + } + + /** + * Gets the job meta's repository directory interface. + * + * @return RepositoryDirectoryInterface + */ + public RepositoryDirectoryInterface getRepositoryDirectory() { + if ( jobMeta == null ) { + return null; + } + return jobMeta.getRepositoryDirectory(); + } + + /** + * Gets the logLevel. + * + * @return logLevel + */ + public LogLevel getLogLevel() { + return logLevel; + } + + /** + * Sets the log level. + * + * @param logLevel + * the new log level + */ + public void setLogLevel( LogLevel logLevel ) { + this.logLevel = logLevel; + log.setLogLevel( logLevel ); + } + + /** + * Gets the logging hierarchy. + * + * @return the logging hierarchy + */ + public List getLoggingHierarchy() { + List hierarchy = new ArrayList(); + List childIds = LoggingRegistry.getInstance().getLogChannelChildren( getLogChannelId() ); + for ( String childId : childIds ) { + LoggingObjectInterface loggingObject = LoggingRegistry.getInstance().getLoggingObject( childId ); + if ( loggingObject != null ) { + hierarchy.add( new LoggingHierarchy( getLogChannelId(), batchId, loggingObject ) ); + } + } + + return hierarchy; + } + + /** + * Gets the boolean value of interactive. + * + * @return the interactive + */ + public boolean isInteractive() { + return interactive; + } + + /** + * Sets the value of interactive. + * + * @param interactive + * the interactive to set + */ + public void setInteractive( boolean interactive ) { + this.interactive = interactive; + } + + /** + * Gets the activeJobEntryTransformations. + * + * @return the activeJobEntryTransformations + */ + public Map getActiveJobEntryTransformations() { + return activeJobEntryTransformations; + } + + /** + * Gets the activeJobEntryJobs. + * + * @return the activeJobEntryJobs + */ + public Map getActiveJobEntryJobs() { + return activeJobEntryJobs; + } + + /** + * Gets a flat list of results in THIS job, in the order of execution of job entries. + * + * @return A flat list of results in THIS job, in the order of execution of job entries + */ + public List getJobEntryResults() { + synchronized ( jobEntryResults ) { + return new ArrayList( jobEntryResults ); + } + } + + /** + * Gets the carteObjectId. + * + * @return the carteObjectId + */ + public String getContainerObjectId() { + return containerObjectId; + } + + /** + * Sets the execution container object id (containerObjectId). + * + * @param containerObjectId + * the execution container object id to set + */ + public void setContainerObjectId( String containerObjectId ) { + this.containerObjectId = containerObjectId; + } + + /** + * Gets the parent logging object. + * + * @return the parent logging object + */ + public LoggingObjectInterface getParentLoggingObject() { + return parentLoggingObject; + } + + /** + * Gets the registration date. For job, this always returns null + * + * @return null + */ + public Date getRegistrationDate() { + return null; + } + + /** + * Gets the start job entry copy. + * + * @return the startJobEntryCopy + */ + public JobEntryCopy getStartJobEntryCopy() { + return startJobEntryCopy; + } + + /** + * Sets the start job entry copy. + * + * @param startJobEntryCopy + * the startJobEntryCopy to set + */ + public void setStartJobEntryCopy( JobEntryCopy startJobEntryCopy ) { + this.startJobEntryCopy = startJobEntryCopy; + } + + /** + * Gets the executing server. + * + * @return the executingServer + */ + public String getExecutingServer() { + return executingServer; + } + + /** + * Sets the executing server. + * + * @param executingServer + * the executingServer to set + */ + public void setExecutingServer( String executingServer ) { + this.executingServer = executingServer; + } + + /** + * Gets the executing user. + * + * @return the executingUser + */ + public String getExecutingUser() { + return executingUser; + } + + /** + * Sets the executing user. + * + * @param executingUser + * the executingUser to set + */ + public void setExecutingUser( String executingUser ) { + this.executingUser = executingUser; + } + + @Override + public boolean isGatheringMetrics() { + return log != null && log.isGatheringMetrics(); + } + + @Override + public void setGatheringMetrics( boolean gatheringMetrics ) { + if ( log != null ) { + log.setGatheringMetrics( gatheringMetrics ); + } + } + + @Override + public boolean isForcingSeparateLogging() { + return log != null && log.isForcingSeparateLogging(); + } + + @Override + public void setForcingSeparateLogging( boolean forcingSeparateLogging ) { + if ( log != null ) { + log.setForcingSeparateLogging( forcingSeparateLogging ); + } + } + + /** + * Gets the transaction id. + * + * @return the transactionId + */ + public String getTransactionId() { + return transactionId; + } + + /** + * Sets the transaction id. + * + * @param transactionId + * the transactionId to set + */ + public void setTransactionId( String transactionId ) { + this.transactionId = transactionId; + } + + public List getDelegationListeners() { + return delegationListeners; + } + + public void setDelegationListeners( List delegationListeners ) { + this.delegationListeners = delegationListeners; + } + + public void addDelegationListener( DelegationListener delegationListener ) { + delegationListeners.add( delegationListener ); + } + + public String[] getArguments() { + return arguments; + } + + public void setArguments( String[] arguments ) { + this.arguments = arguments; + } + + public Trans getParentTrans() { + return parentTrans; + } + + public void setParentTrans( Trans parentTrans ) { + this.parentTrans = parentTrans; + } + + public Map getExtensionDataMap() { + return extensionDataMap; + } + + public Result getStartJobEntryResult() { + return startJobEntryResult; + } + + public void setStartJobEntryResult( Result startJobEntryResult ) { + this.startJobEntryResult = startJobEntryResult; + } + + protected ExecutorService startHeartbeat( final long intervalInSeconds ) { + + final ScheduledExecutorService heartbeat = Executors.newSingleThreadScheduledExecutor( new ThreadFactory() { + + @Override + public Thread newThread( Runnable r ) { + Thread thread = new Thread( r, "Job Heartbeat Thread for: " + getName() ); + thread.setDaemon( true ); + return thread; + } + } ); + + heartbeat.scheduleAtFixedRate( new Runnable() { + public void run() { + + if ( Job.this.isFinished() ) { + log.logBasic( "Shutting down heartbeat signal for " + jobMeta.getName() ); + shutdownHeartbeat( heartbeat ); + return; + } + + try { + + log.logDebug( "Triggering heartbeat signal for " + jobMeta.getName() + " at every " + intervalInSeconds + + " seconds" ); + ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.JobHeartbeat.id, Job.this ); + + } catch ( KettleException e ) { + log.logError( e.getMessage(), e ); + } + } + }, intervalInSeconds /* initial delay */, intervalInSeconds /* interval delay */, TimeUnit.SECONDS ); + + return heartbeat; + } + + protected void shutdownHeartbeat( ExecutorService heartbeat ) { + + if ( heartbeat != null ) { + + try { + heartbeat.shutdownNow(); // prevents waiting tasks from starting and attempts to stop currently executing ones + + } catch ( Throwable t ) { + /* do nothing */ + } + } + } + + private int getHeartbeatIntervalInSeconds() { + + JobMeta meta = this.jobMeta; + + // 1 - check if there's a user defined value ( job-specific ) heartbeat periodic interval; + // 2 - check if there's a default defined value ( job-specific ) heartbeat periodic interval; + // 3 - use default Const.HEARTBEAT_PERIODIC_INTERVAL_IN_SECS if none of the above have been set + + try { + + if ( meta != null ) { + + return Const.toInt( meta.getParameterValue( Const.VARIABLE_HEARTBEAT_PERIODIC_INTERVAL_SECS ), Const.toInt( meta + .getParameterDefault( Const.VARIABLE_HEARTBEAT_PERIODIC_INTERVAL_SECS ), + Const.HEARTBEAT_PERIODIC_INTERVAL_IN_SECS ) ); + } + + } catch ( Exception e ) { + /* do nothing, return Const.HEARTBEAT_PERIODIC_INTERVAL_IN_SECS */ + } + + return Const.HEARTBEAT_PERIODIC_INTERVAL_IN_SECS; + } +} diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/job/JobMeta.java b/pentaho-kettle/src/main/java/org/pentaho/di/job/JobMeta.java new file mode 100644 index 0000000..5c16d7b --- /dev/null +++ b/pentaho-kettle/src/main/java/org/pentaho/di/job/JobMeta.java @@ -0,0 +1,2853 @@ +// CHECKSTYLE:FileLength:OFF +/*! ****************************************************************************** + * + * Pentaho Data Integration + * + * Copyright (C) 2002-2016 by Pentaho : http://www.pentaho.com + * + ******************************************************************************* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + ******************************************************************************/ + +package org.pentaho.di.job; + +import org.apache.commons.lang.StringUtils; +import org.apache.commons.vfs2.FileName; +import org.apache.commons.vfs2.FileObject; +import org.apache.commons.vfs2.FileSystemException; +import org.pentaho.di.base.AbstractMeta; +import org.pentaho.di.cluster.SlaveServer; +import org.pentaho.di.core.CheckResultInterface; +import org.pentaho.di.core.Const; +import org.pentaho.di.core.LastUsedFile; +import org.pentaho.di.core.NotePadMeta; +import org.pentaho.di.core.ProgressMonitorListener; +import org.pentaho.di.core.Props; +import org.pentaho.di.core.SQLStatement; +import org.pentaho.di.core.attributes.AttributesUtil; +import org.pentaho.di.core.database.Database; +import org.pentaho.di.core.database.DatabaseMeta; +import org.pentaho.di.core.exception.IdNotFoundException; +import org.pentaho.di.core.exception.KettleDatabaseException; +import org.pentaho.di.core.exception.KettleException; +import org.pentaho.di.core.exception.KettleFileException; +import org.pentaho.di.core.exception.KettleXMLException; +import org.pentaho.di.core.exception.LookupReferencesException; +import org.pentaho.di.core.extension.ExtensionPointHandler; +import org.pentaho.di.core.extension.KettleExtensionPoint; +import org.pentaho.di.core.gui.OverwritePrompter; +import org.pentaho.di.core.gui.Point; +import org.pentaho.di.core.logging.ChannelLogTable; +import org.pentaho.di.core.logging.JobEntryLogTable; +import org.pentaho.di.core.logging.JobLogTable; +import org.pentaho.di.core.logging.LogChannel; +import org.pentaho.di.core.logging.LogStatus; +import org.pentaho.di.core.logging.LogTableInterface; +import org.pentaho.di.core.logging.LogTablePluginInterface; +import org.pentaho.di.core.logging.LogTablePluginInterface.TableType; +import org.pentaho.di.core.logging.LogTablePluginType; +import org.pentaho.di.core.logging.LoggingObjectInterface; +import org.pentaho.di.core.logging.LoggingObjectType; +import org.pentaho.di.core.parameters.NamedParamsDefault; +import org.pentaho.di.core.parameters.UnknownParamException; +import org.pentaho.di.core.plugins.PluginInterface; +import org.pentaho.di.core.plugins.PluginRegistry; +import org.pentaho.di.core.reflection.StringSearchResult; +import org.pentaho.di.core.reflection.StringSearcher; +import org.pentaho.di.core.row.RowMetaInterface; +import org.pentaho.di.core.util.StringUtil; +import org.pentaho.di.core.variables.VariableSpace; +import org.pentaho.di.core.vfs.KettleVFS; +import org.pentaho.di.core.xml.XMLHandler; +import org.pentaho.di.core.xml.XMLInterface; +import org.pentaho.di.i18n.BaseMessages; +import org.pentaho.di.job.entries.missing.MissingEntry; +import org.pentaho.di.job.entries.special.JobEntrySpecial; +import org.pentaho.di.job.entry.JobEntryCopy; +import org.pentaho.di.job.entry.JobEntryInterface; +import org.pentaho.di.repository.ObjectId; +import org.pentaho.di.repository.Repository; +import org.pentaho.di.repository.RepositoryDirectory; +import org.pentaho.di.repository.RepositoryElementInterface; +import org.pentaho.di.repository.RepositoryObjectType; +import org.pentaho.di.resource.ResourceDefinition; +import org.pentaho.di.resource.ResourceExportInterface; +import org.pentaho.di.resource.ResourceNamingInterface; +import org.pentaho.di.resource.ResourceReference; +import org.pentaho.di.shared.SharedObjectInterface; +import org.pentaho.di.shared.SharedObjects; +import org.pentaho.metastore.api.IMetaStore; +import org.pentaho.metastore.api.exceptions.MetaStoreException; +import org.w3c.dom.Document; +import org.w3c.dom.Node; + +import java.io.InputStream; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +/** + * The definition of a PDI job is represented by a JobMeta object. It is typically loaded from a .kjb file, a PDI + * repository, or it is generated dynamically. The declared parameters of the job definition are then queried using + * listParameters() and assigned values using calls to setParameterValue(..). JobMeta provides methods to load, save, + * verify, etc. + * + * @author Matt + * @since 11-08-2003 + */ +public class JobMeta extends AbstractMeta + implements Cloneable, Comparable, XMLInterface, ResourceExportInterface, RepositoryElementInterface, + LoggingObjectInterface { + + private static Class PKG = JobMeta.class; // for i18n purposes, needed by Translator2!! + + public static final String XML_TAG = "job"; + + protected static final String XML_TAG_SLAVESERVERS = "slaveservers"; + + /** + * A constant specifying the repository element type as a Job. + */ + public static final RepositoryObjectType REPOSITORY_ELEMENT_TYPE = RepositoryObjectType.JOB; + + protected String jobVersion; + + protected int jobStatus; + + protected List jobcopies; + + protected List jobhops; + + protected String[] arguments; + + protected boolean changedEntries, changedHops; + + protected JobLogTable jobLogTable; + + protected JobEntryLogTable jobEntryLogTable; + + protected List extraLogTables; + + /** + * Constant = "SPECIAL" + **/ + public static final String STRING_SPECIAL = "SPECIAL"; + + /** + * Constant = "START" + **/ + public static final String STRING_SPECIAL_START = "START"; + + /** + * Constant = "DUMMY" + **/ + public static final String STRING_SPECIAL_DUMMY = "DUMMY"; + + /** + * Constant = "OK" + **/ + public static final String STRING_SPECIAL_OK = "OK"; + + /** + * Constant = "ERROR" + **/ + public static final String STRING_SPECIAL_ERROR = "ERROR"; + + /** + * The loop cache. + */ + protected Map loopCache; + + /** + * List of booleans indicating whether or not to remember the size and position of the different windows... + */ + public boolean[] max = new boolean[1]; + + protected boolean batchIdPassed; + + protected static final String XML_TAG_PARAMETERS = "parameters"; + + private List missingEntries; + + /** + * Instantiates a new job meta. + */ + public JobMeta() { + clear(); + initializeVariablesFrom( null ); + } + + /** + * Clears or reinitializes many of the JobMeta properties. + */ + @Override + public void clear() { + jobcopies = new ArrayList(); + jobhops = new ArrayList(); + + jobLogTable = JobLogTable.getDefault( this, this ); + jobEntryLogTable = JobEntryLogTable.getDefault( this, this ); + extraLogTables = new ArrayList(); + + List plugins = PluginRegistry.getInstance().getPlugins( LogTablePluginType.class ); + for ( PluginInterface plugin : plugins ) { + try { + LogTablePluginInterface logTablePluginInterface = (LogTablePluginInterface) PluginRegistry.getInstance() + .loadClass( plugin ); + if ( logTablePluginInterface.getType() == TableType.JOB ) { + logTablePluginInterface.setContext( this, this ); + extraLogTables.add( logTablePluginInterface ); + } + } catch ( Exception e ) { + LogChannel.GENERAL.logError( "Error loading log table plugin with ID " + plugin.getIds()[0], e ); + } + } + + arguments = null; + + super.clear(); + loopCache = new HashMap(); + addDefaults(); + jobStatus = -1; + jobVersion = null; + + // setInternalKettleVariables(); Don't clear the internal variables for + // ad-hoc jobs, it's ruins the previews + // etc. + } + + /** + * Adds the defaults. + */ + public void addDefaults() { + /* + * addStart(); // Add starting point! addDummy(); // Add dummy! addOK(); // errors == 0 evaluation addError(); // + * errors != 0 evaluation + */ + + clearChanged(); + } + + /** + * Creates the start entry. + * + * @return the job entry copy + */ + public static final JobEntryCopy createStartEntry() { + JobEntrySpecial jobEntrySpecial = new JobEntrySpecial( STRING_SPECIAL_START, true, false ); + JobEntryCopy jobEntry = new JobEntryCopy(); + jobEntry.setObjectId( null ); + jobEntry.setEntry( jobEntrySpecial ); + jobEntry.setLocation( 50, 50 ); + jobEntry.setDrawn( false ); + jobEntry.setDescription( BaseMessages.getString( PKG, "JobMeta.StartJobEntry.Description" ) ); + return jobEntry; + + } + + /** + * Creates the dummy entry. + * + * @return the job entry copy + */ + public static final JobEntryCopy createDummyEntry() { + JobEntrySpecial jobEntrySpecial = new JobEntrySpecial( STRING_SPECIAL_DUMMY, false, true ); + JobEntryCopy jobEntry = new JobEntryCopy(); + jobEntry.setObjectId( null ); + jobEntry.setEntry( jobEntrySpecial ); + jobEntry.setLocation( 50, 50 ); + jobEntry.setDrawn( false ); + jobEntry.setDescription( BaseMessages.getString( PKG, "JobMeta.DummyJobEntry.Description" ) ); + return jobEntry; + } + + /** + * Gets the start. + * + * @return the start + */ + public JobEntryCopy getStart() { + for ( int i = 0; i < nrJobEntries(); i++ ) { + JobEntryCopy cge = getJobEntry( i ); + if ( cge.isStart() ) { + return cge; + } + } + return null; + } + + /** + * Gets the dummy. + * + * @return the dummy + */ + public JobEntryCopy getDummy() { + for ( int i = 0; i < nrJobEntries(); i++ ) { + JobEntryCopy cge = getJobEntry( i ); + if ( cge.isDummy() ) { + return cge; + } + } + return null; + } + + /** + * Compares two transformation on name, filename + */ + public int compare( JobMeta j1, JobMeta j2 ) { + if ( Const.isEmpty( j1.getName() ) && !Const.isEmpty( j2.getName() ) ) { + return -1; + } + if ( !Const.isEmpty( j1.getName() ) && Const.isEmpty( j2.getName() ) ) { + return 1; + } + if ( Const.isEmpty( j1.getName() ) && Const.isEmpty( j2.getName() ) || j1.getName().equals( j2.getName() ) ) { + if ( Const.isEmpty( j1.getFilename() ) && !Const.isEmpty( j2.getFilename() ) ) { + return -1; + } + if ( !Const.isEmpty( j1.getFilename() ) && Const.isEmpty( j2.getFilename() ) ) { + return 1; + } + if ( Const.isEmpty( j1.getFilename() ) && Const.isEmpty( j2.getFilename() ) ) { + return 0; + } + return j1.getFilename().compareTo( j2.getFilename() ); + } + + // Compare by name : repositories etc. + // + if ( j1.getObjectRevision() != null && j2.getObjectRevision() == null ) { + return 1; + } + if ( j1.getObjectRevision() == null && j2.getObjectRevision() != null ) { + return -1; + } + int cmp; + if ( j1.getObjectRevision() == null && j2.getObjectRevision() == null ) { + cmp = 0; + } else { + cmp = j1.getObjectRevision().getName().compareTo( j2.getObjectRevision().getName() ); + } + if ( cmp == 0 ) { + return j1.getName().compareTo( j2.getName() ); + } else { + return cmp; + } + } + + /** + * Compares this job's meta-data to the specified job's meta-data. This method simply calls compare(this, o) + * + * @param o the o + * @return the int + * @see #compare(JobMeta, JobMeta) + * @see java.lang.Comparable#compareTo(java.lang.Object) + */ + public int compareTo( JobMeta o ) { + return compare( this, o ); + } + + /** + * Checks whether this job's meta-data object is equal to the specified object. If the specified object is not an + * instance of JobMeta, false is returned. Otherwise the method returns whether a call to compare() indicates equality + * (i.e. compare(this, (JobMeta)obj)==0). + * + * @param obj the obj + * @return true, if successful + * @see #compare(JobMeta, JobMeta) + * @see java.lang.Object#equals(java.lang.Object) + */ + public boolean equals( Object obj ) { + if ( !( obj instanceof JobMeta ) ) { + return false; + } + + return compare( this, (JobMeta) obj ) == 0; + } + + /** + * Clones the job meta-data object. + * + * @return a clone of the job meta-data object + * @see java.lang.Object#clone() + */ + public Object clone() { + return realClone( true ); + } + + /** + * Perform a real clone of the job meta-data object, including cloning all lists and copying all values. If the + * doClear parameter is true, the clone will be cleared of ALL values before the copy. If false, only the copied + * fields will be cleared. + * + * @param doClear Whether to clear all of the clone's data before copying from the source object + * @return a real clone of the calling object + */ + public Object realClone( boolean doClear ) { + try { + JobMeta jobMeta = (JobMeta) super.clone(); + if ( doClear ) { + jobMeta.clear(); + } else { + jobMeta.jobcopies = new ArrayList(); + jobMeta.jobhops = new ArrayList(); + jobMeta.notes = new ArrayList(); + jobMeta.databases = new ArrayList(); + jobMeta.slaveServers = new ArrayList(); + jobMeta.namedParams = new NamedParamsDefault(); + } + + for ( JobEntryCopy entry : jobcopies ) { + jobMeta.jobcopies.add( (JobEntryCopy) entry.clone_deep() ); + } + for ( JobHopMeta entry : jobhops ) { + jobMeta.jobhops.add( (JobHopMeta) entry.clone() ); + } + for ( NotePadMeta entry : notes ) { + jobMeta.notes.add( (NotePadMeta) entry.clone() ); + } + for ( DatabaseMeta entry : databases ) { + jobMeta.databases.add( (DatabaseMeta) entry.clone() ); + } + for ( SlaveServer slave : slaveServers ) { + jobMeta.getSlaveServers().add( (SlaveServer) slave.clone() ); + } + for ( String key : listParameters() ) { + jobMeta.addParameterDefinition( key, getParameterDefault( key ), getParameterDescription( key ) ); + } + return jobMeta; + } catch ( Exception e ) { + return null; + } + } + + /** + * Gets the job log table. + * + * @return the job log table + */ + public JobLogTable getJobLogTable() { + return jobLogTable; + } + + /** + * Sets the job log table. + * + * @param jobLogTable the new job log table + */ + public void setJobLogTable( JobLogTable jobLogTable ) { + this.jobLogTable = jobLogTable; + } + + /** + * Clears the different changed flags of the job. + */ + @Override + public void clearChanged() { + changedEntries = false; + changedHops = false; + + for ( int i = 0; i < nrJobEntries(); i++ ) { + JobEntryCopy entry = getJobEntry( i ); + entry.setChanged( false ); + } + for ( JobHopMeta hi : jobhops ) { + // Look at all the hops + hi.setChanged( false ); + } + super.clearChanged(); + } + + /* + * (non-Javadoc) + * + * @see org.pentaho.di.core.changed.ChangedFlag#hasChanged() + */ + @Override + public boolean hasChanged() { + if ( super.hasChanged() ) { + return true; + } + + if ( haveJobEntriesChanged() ) { + return true; + } + if ( haveJobHopsChanged() ) { + return true; + } + + return false; + } + + private Set getUsedDatabaseMetas() { + Set databaseMetas = new HashSet(); + for ( JobEntryCopy jobEntryCopy : getJobCopies() ) { + DatabaseMeta[] dbs = jobEntryCopy.getEntry().getUsedDatabaseConnections(); + if ( dbs != null ) { + for ( DatabaseMeta db : dbs ) { + databaseMetas.add( db ); + } + } + } + + databaseMetas.add( jobLogTable.getDatabaseMeta() ); + + for ( LogTableInterface logTable : getExtraLogTables() ) { + databaseMetas.add( logTable.getDatabaseMeta() ); + } + return databaseMetas; + } + + /** + * This method asks all steps in the transformation whether or not the specified database connection is used. The + * connection is used in the transformation if any of the steps uses it or if it is being used to log to. + * + * @param databaseMeta The connection to check + * @return true if the connection is used in this transformation. + */ + public boolean isDatabaseConnectionUsed( DatabaseMeta databaseMeta ) { + return getUsedDatabaseMetas().contains( databaseMeta ); + } + + /* + * (non-Javadoc) + * + * @see org.pentaho.di.core.EngineMetaInterface#getFileType() + */ + public String getFileType() { + return LastUsedFile.FILE_TYPE_JOB; + } + + /** + * Gets the job filter names. + * + * @return the filter names + * @see org.pentaho.di.core.EngineMetaInterface#getFilterNames() + */ + public String[] getFilterNames() { + return Const.getJobFilterNames(); + } + + /** + * Gets the job filter extensions. For JobMeta, this method returns the value of {@link Const.STRING_JOB_FILTER_EXT} + * + * @return the filter extensions + * @see org.pentaho.di.core.EngineMetaInterface#getFilterExtensions() + */ + public String[] getFilterExtensions() { + return Const.STRING_JOB_FILTER_EXT; + } + + /** + * Gets the default extension for a job. For JobMeta, this method returns the value of + * {@link Const#STRING_JOB_DEFAULT_EXT} + * + * @return the default extension + * @see org.pentaho.di.core.EngineMetaInterface#getDefaultExtension() + */ + public String getDefaultExtension() { + return Const.STRING_JOB_DEFAULT_EXT; + } + + /* + * (non-Javadoc) + * + * @see org.pentaho.di.core.xml.XMLInterface#getXML() + */ + public String getXML() { + Props props = null; + if ( Props.isInitialized() ) { + props = Props.getInstance(); + } + + StringBuffer retval = new StringBuffer( 500 ); + + retval.append( "<" ).append( XML_TAG ).append( ">" ).append( Const.CR ); + + retval.append( " " ).append( XMLHandler.addTagValue( "name", getName() ) ); + + retval.append( " " ).append( XMLHandler.addTagValue( "description", description ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "extended_description", extendedDescription ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "job_version", jobVersion ) ); + if ( jobStatus >= 0 ) { + retval.append( " " ).append( XMLHandler.addTagValue( "job_status", jobStatus ) ); + } + + retval.append( " " ).append( XMLHandler.addTagValue( "directory", + ( directory != null ? directory.getPath() : RepositoryDirectory.DIRECTORY_SEPARATOR ) ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "created_user", createdUser ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "created_date", XMLHandler.date2string( createdDate ) ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "modified_user", modifiedUser ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "modified_date", XMLHandler.date2string( modifiedDate ) ) ); + + retval.append( " " ).append( XMLHandler.openTag( XML_TAG_PARAMETERS ) ).append( Const.CR ); + String[] parameters = listParameters(); + for ( int idx = 0; idx < parameters.length; idx++ ) { + retval.append( " " ).append( XMLHandler.openTag( "parameter" ) ).append( Const.CR ); + retval.append( " " ).append( XMLHandler.addTagValue( "name", parameters[idx] ) ); + try { + retval.append( " " ) + .append( XMLHandler.addTagValue( "default_value", getParameterDefault( parameters[idx] ) ) ); + retval.append( " " ) + .append( XMLHandler.addTagValue( "description", getParameterDescription( parameters[idx] ) ) ); + } catch ( UnknownParamException e ) { + // skip the default value and/or description. This exception should never happen because we use listParameters() + // above. + } + retval.append( " " ).append( XMLHandler.closeTag( "parameter" ) ).append( Const.CR ); + } + retval.append( " " ).append( XMLHandler.closeTag( XML_TAG_PARAMETERS ) ).append( Const.CR ); + + Set usedDatabaseMetas = getUsedDatabaseMetas(); + // Save the database connections... + for ( int i = 0; i < nrDatabases(); i++ ) { + DatabaseMeta dbMeta = getDatabase( i ); + if ( props != null && props.areOnlyUsedConnectionsSavedToXML() ) { + if ( usedDatabaseMetas.contains( dbMeta ) ) { + retval.append( dbMeta.getXML() ); + } + } else { + retval.append( dbMeta.getXML() ); + } + } + + // The slave servers... + // + retval.append( " " ).append( XMLHandler.openTag( XML_TAG_SLAVESERVERS ) ).append( Const.CR ); + for ( int i = 0; i < slaveServers.size(); i++ ) { + SlaveServer slaveServer = slaveServers.get( i ); + retval.append( " " ).append( slaveServer.getXML() ).append( Const.CR ); + } + retval.append( " " ).append( XMLHandler.closeTag( XML_TAG_SLAVESERVERS ) ).append( Const.CR ); + + // Append the job logging information... + // + for ( LogTableInterface logTable : getLogTables() ) { + retval.append( logTable.getXML() ); + } + + retval.append( " " ).append( XMLHandler.addTagValue( "pass_batchid", batchIdPassed ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "shared_objects_file", sharedObjectsFile ) ); + + retval.append( " " ).append( Const.CR ); + for ( int i = 0; i < nrJobEntries(); i++ ) { + JobEntryCopy jge = getJobEntry( i ); + jge.getEntry().setRepository( repository ); + retval.append( jge.getXML() ); + } + retval.append( " " ).append( Const.CR ); + + retval.append( " " ).append( Const.CR ); + for ( JobHopMeta hi : jobhops ) { + // Look at all the hops + retval.append( hi.getXML() ); + } + retval.append( " " ).append( Const.CR ); + + retval.append( " " ).append( Const.CR ); + for ( int i = 0; i < nrNotes(); i++ ) { + NotePadMeta ni = getNote( i ); + retval.append( ni.getXML() ); + } + retval.append( " " ).append( Const.CR ); + + // Also store the attribute groups + // + retval.append( AttributesUtil.getAttributesXml( attributesMap ) ).append( Const.CR ); + + retval.append( "" ).append( Const.CR ); + + return retval.toString(); + } + + /** + * Instantiates a new job meta. + * + * @param fname the fname + * @param rep the rep + * @throws KettleXMLException the kettle xml exception + */ + public JobMeta( String fname, Repository rep ) throws KettleXMLException { + this( null, fname, rep, null ); + } + + /** + * Instantiates a new job meta. + * + * @param fname the fname + * @param rep the rep + * @param prompter the prompter + * @throws KettleXMLException the kettle xml exception + */ + public JobMeta( String fname, Repository rep, OverwritePrompter prompter ) throws KettleXMLException { + this( null, fname, rep, prompter ); + } + + /** + * Load the job from the XML file specified. + * + * @param fname The filename to load as a job + * @param rep The repository to bind againt, null if there is no repository available. + * @throws KettleXMLException + */ + @Deprecated + public JobMeta( VariableSpace parentSpace, String fname, Repository rep, OverwritePrompter prompter ) + throws KettleXMLException { + this( parentSpace, fname, rep, null, prompter ); + } + + /** + * Load the job from the XML file specified. + * + * @param fname The filename to load as a job + * @param rep The repository to bind againt, null if there is no repository available. + * @throws KettleXMLException + */ + public JobMeta( VariableSpace parentSpace, String fname, Repository rep, IMetaStore metaStore, + OverwritePrompter prompter ) throws KettleXMLException { + this.initializeVariablesFrom( parentSpace ); + this.metaStore = metaStore; + try { + // OK, try to load using the VFS stuff... + Document doc = XMLHandler.loadXMLFile( KettleVFS.getFileObject( fname, this ) ); + if ( doc != null ) { + // The jobnode + Node jobnode = XMLHandler.getSubNode( doc, XML_TAG ); + + loadXML( jobnode, fname, rep, metaStore, false, prompter ); + } else { + throw new KettleXMLException( + BaseMessages.getString( PKG, "JobMeta.Exception.ErrorReadingFromXMLFile" ) + fname ); + } + } catch ( Exception e ) { + throw new KettleXMLException( + BaseMessages.getString( PKG, "JobMeta.Exception.UnableToLoadJobFromXMLFile" ) + fname + "]", e ); + } + } + + /** + * Instantiates a new job meta. + * + * @param inputStream the input stream + * @param rep the rep + * @param prompter the prompter + * @throws KettleXMLException the kettle xml exception + */ + public JobMeta( InputStream inputStream, Repository rep, OverwritePrompter prompter ) throws KettleXMLException { + this(); + Document doc = XMLHandler.loadXMLFile( inputStream, null, false, false ); + loadXML( XMLHandler.getSubNode( doc, JobMeta.XML_TAG ), rep, prompter ); + } + + /** + * Create a new JobMeta object by loading it from a a DOM node. + * + * @param jobnode The node to load from + * @param rep The reference to a repository to load additional information from + * @param prompter The prompter to use in case a shared object gets overwritten + * @throws KettleXMLException + */ + public JobMeta( Node jobnode, Repository rep, OverwritePrompter prompter ) throws KettleXMLException { + this(); + loadXML( jobnode, rep, false, prompter ); + } + + /** + * Create a new JobMeta object by loading it from a a DOM node. + * + * @param jobnode The node to load from + * @param rep The reference to a repository to load additional information from + * @param ignoreRepositorySharedObjects Do not load shared objects, handled separately + * @param prompter The prompter to use in case a shared object gets overwritten + * @throws KettleXMLException + */ + public JobMeta( Node jobnode, Repository rep, boolean ignoreRepositorySharedObjects, OverwritePrompter prompter ) + throws KettleXMLException { + this(); + loadXML( jobnode, rep, ignoreRepositorySharedObjects, prompter ); + } + + /** + * Checks if is rep reference. + * + * @return true, if is rep reference + */ + public boolean isRepReference() { + return isRepReference( getFilename(), this.getName() ); + } + + /** + * Checks if is file reference. + * + * @return true, if is file reference + */ + public boolean isFileReference() { + return !isRepReference( getFilename(), this.getName() ); + } + + /** + * Checks if is rep reference. + * + * @param fileName the file name + * @param transName the trans name + * @return true, if is rep reference + */ + public static boolean isRepReference( String fileName, String transName ) { + return Const.isEmpty( fileName ) && !Const.isEmpty( transName ); + } + + /** + * Checks if is file reference. + * + * @param fileName the file name + * @param transName the trans name + * @return true, if is file reference + */ + public static boolean isFileReference( String fileName, String transName ) { + return !isRepReference( fileName, transName ); + } + + /** + * Load xml. + * + * @param jobnode the jobnode + * @param rep the rep + * @param prompter the prompter + * @throws KettleXMLException the kettle xml exception + */ + public void loadXML( Node jobnode, Repository rep, OverwritePrompter prompter ) throws KettleXMLException { + loadXML( jobnode, rep, false, prompter ); + } + + /** + * Load xml. + * + * @param jobnode the jobnode + * @param fname The filename + * @param rep the rep + * @param prompter the prompter + * @throws KettleXMLException the kettle xml exception + */ + public void loadXML( Node jobnode, String fname, Repository rep, OverwritePrompter prompter ) + throws KettleXMLException { + loadXML( jobnode, fname, rep, false, prompter ); + } + + /** + * Load a block of XML from an DOM node. + * + * @param jobnode The node to load from + * @param rep The reference to a repository to load additional information from + * @param ignoreRepositorySharedObjects Do not load shared objects, handled separately + * @param prompter The prompter to use in case a shared object gets overwritten + * @throws KettleXMLException + */ + public void loadXML( Node jobnode, Repository rep, boolean ignoreRepositorySharedObjects, OverwritePrompter prompter ) + throws KettleXMLException { + loadXML( jobnode, null, rep, ignoreRepositorySharedObjects, prompter ); + } + + /** + * Load a block of XML from an DOM node. + * + * @param jobnode The node to load from + * @param fname The filename + * @param rep The reference to a repository to load additional information from + * @param ignoreRepositorySharedObjects Do not load shared objects, handled separately + * @param prompter The prompter to use in case a shared object gets overwritten + * @throws KettleXMLException + * @deprecated + */ + @Deprecated + public void loadXML( Node jobnode, String fname, Repository rep, boolean ignoreRepositorySharedObjects, + OverwritePrompter prompter ) throws KettleXMLException { + loadXML( jobnode, fname, rep, null, ignoreRepositorySharedObjects, prompter ); + } + + /** + * Load a block of XML from an DOM node. + * + * @param jobnode The node to load from + * @param fname The filename + * @param rep The reference to a repository to load additional information from + * @param metaStore the MetaStore to use + * @param ignoreRepositorySharedObjects Do not load shared objects, handled separately + * @param prompter The prompter to use in case a shared object gets overwritten + * @throws KettleXMLException + */ + public void loadXML( Node jobnode, String fname, Repository rep, IMetaStore metaStore, + boolean ignoreRepositorySharedObjects, OverwritePrompter prompter ) throws KettleXMLException { + Props props = null; + if ( Props.isInitialized() ) { + props = Props.getInstance(); + } + + try { + // clear the jobs; + clear(); + + // If we are not using a repository, we are getting the job from a file + // Set the filename here so it can be used in variables for ALL aspects of the job FIX: PDI-8890 + if ( null == rep ) { + setFilename( fname ); + } + + // + // get job info: + // + setName( XMLHandler.getTagValue( jobnode, "name" ) ); + + // Optionally load the repository directory... + // + if ( rep != null ) { + String directoryPath = XMLHandler.getTagValue( jobnode, "directory" ); + if ( directoryPath != null ) { + directory = rep.findDirectory( directoryPath ); + if ( directory == null ) { // not found + directory = new RepositoryDirectory(); // The root as default + } + } + } + + // description + description = XMLHandler.getTagValue( jobnode, "description" ); + + // extended description + extendedDescription = XMLHandler.getTagValue( jobnode, "extended_description" ); + + // job version + jobVersion = XMLHandler.getTagValue( jobnode, "job_version" ); + + // job status + jobStatus = Const.toInt( XMLHandler.getTagValue( jobnode, "job_status" ), -1 ); + + // Created user/date + createdUser = XMLHandler.getTagValue( jobnode, "created_user" ); + String createDate = XMLHandler.getTagValue( jobnode, "created_date" ); + + if ( createDate != null ) { + createdDate = XMLHandler.stringToDate( createDate ); + } + + // Changed user/date + modifiedUser = XMLHandler.getTagValue( jobnode, "modified_user" ); + String modDate = XMLHandler.getTagValue( jobnode, "modified_date" ); + if ( modDate != null ) { + modifiedDate = XMLHandler.stringToDate( modDate ); + } + + // Load the default list of databases + // Read objects from the shared XML file & the repository + try { + sharedObjectsFile = XMLHandler.getTagValue( jobnode, "shared_objects_file" ); + if ( rep == null || ignoreRepositorySharedObjects ) { + sharedObjects = readSharedObjects(); + } else { + sharedObjects = rep.readJobMetaSharedObjects( this ); + } + } catch ( Exception e ) { + LogChannel.GENERAL + .logError( BaseMessages.getString( PKG, "JobMeta.ErrorReadingSharedObjects.Message", e.toString() ) ); + LogChannel.GENERAL.logError( Const.getStackTracker( e ) ); + } + + // Load the database connections, slave servers, cluster schemas & partition schemas into this object. + // + importFromMetaStore(); + + // Read the named parameters. + Node paramsNode = XMLHandler.getSubNode( jobnode, XML_TAG_PARAMETERS ); + int nrParams = XMLHandler.countNodes( paramsNode, "parameter" ); + + for ( int i = 0; i < nrParams; i++ ) { + Node paramNode = XMLHandler.getSubNodeByNr( paramsNode, "parameter", i ); + + String paramName = XMLHandler.getTagValue( paramNode, "name" ); + String defValue = XMLHandler.getTagValue( paramNode, "default_value" ); + String descr = XMLHandler.getTagValue( paramNode, "description" ); + + addParameterDefinition( paramName, defValue, descr ); + } + + // + // Read the database connections + // + int nr = XMLHandler.countNodes( jobnode, "connection" ); + Set privateDatabases = new HashSet( nr ); + for ( int i = 0; i < nr; i++ ) { + Node dbnode = XMLHandler.getSubNodeByNr( jobnode, "connection", i ); + DatabaseMeta dbcon = new DatabaseMeta( dbnode ); + dbcon.shareVariablesWith( this ); + if ( !dbcon.isShared() ) { + privateDatabases.add( dbcon.getName() ); + } + + DatabaseMeta exist = findDatabase( dbcon.getName() ); + if ( exist == null ) { + addDatabase( dbcon ); + } else { + if ( !exist.isShared() ) { + // skip shared connections + if ( shouldOverwrite( prompter, props, + BaseMessages.getString( PKG, "JobMeta.Dialog.ConnectionExistsOverWrite.Message", dbcon.getName() ), + BaseMessages.getString( PKG, "JobMeta.Dialog.ConnectionExistsOverWrite.DontShowAnyMoreMessage" ) ) ) { + int idx = indexOfDatabase( exist ); + removeDatabase( idx ); + addDatabase( idx, dbcon ); + } + } + } + } + setPrivateDatabases( privateDatabases ); + + // Read the slave servers... + // + Node slaveServersNode = XMLHandler.getSubNode( jobnode, XML_TAG_SLAVESERVERS ); + int nrSlaveServers = XMLHandler.countNodes( slaveServersNode, SlaveServer.XML_TAG ); + for ( int i = 0; i < nrSlaveServers; i++ ) { + Node slaveServerNode = XMLHandler.getSubNodeByNr( slaveServersNode, SlaveServer.XML_TAG, i ); + SlaveServer slaveServer = new SlaveServer( slaveServerNode ); + slaveServer.shareVariablesWith( this ); + + // Check if the object exists and if it's a shared object. + // If so, then we will keep the shared version, not this one. + // The stored XML is only for backup purposes. + SlaveServer check = findSlaveServer( slaveServer.getName() ); + if ( check != null ) { + if ( !check.isShared() ) { + // we don't overwrite shared objects. + if ( shouldOverwrite( prompter, props, BaseMessages + .getString( PKG, "JobMeta.Dialog.SlaveServerExistsOverWrite.Message", slaveServer.getName() ), + BaseMessages.getString( PKG, "JobMeta.Dialog.ConnectionExistsOverWrite.DontShowAnyMoreMessage" ) ) ) { + addOrReplaceSlaveServer( slaveServer ); + } + } + } else { + slaveServers.add( slaveServer ); + } + } + + /* + * Get the log database connection & log table + */ + // Backward compatibility... + // + Node jobLogNode = XMLHandler.getSubNode( jobnode, JobLogTable.XML_TAG ); + if ( jobLogNode == null ) { + // Load the XML + // + jobLogTable.setConnectionName( XMLHandler.getTagValue( jobnode, "logconnection" ) ); + jobLogTable.setTableName( XMLHandler.getTagValue( jobnode, "logtable" ) ); + jobLogTable.setBatchIdUsed( "Y".equalsIgnoreCase( XMLHandler.getTagValue( jobnode, "use_batchid" ) ) ); + jobLogTable.setLogFieldUsed( "Y".equalsIgnoreCase( XMLHandler.getTagValue( jobnode, "use_logfield" ) ) ); + jobLogTable.findField( JobLogTable.ID.CHANNEL_ID ).setEnabled( false ); + jobLogTable.findField( JobLogTable.ID.LINES_REJECTED ).setEnabled( false ); + } else { + jobLogTable.loadXML( jobLogNode, databases, null ); + } + + Node channelLogTableNode = XMLHandler.getSubNode( jobnode, ChannelLogTable.XML_TAG ); + if ( channelLogTableNode != null ) { + channelLogTable.loadXML( channelLogTableNode, databases, null ); + } + jobEntryLogTable.loadXML( jobnode, databases, null ); + + for ( LogTableInterface extraLogTable : extraLogTables ) { + extraLogTable.loadXML( jobnode, databases, null ); + } + + batchIdPassed = "Y".equalsIgnoreCase( XMLHandler.getTagValue( jobnode, "pass_batchid" ) ); + + /* + * read the job entries... + */ + Node entriesnode = XMLHandler.getSubNode( jobnode, "entries" ); + int tr = XMLHandler.countNodes( entriesnode, "entry" ); + for ( int i = 0; i < tr; i++ ) { + Node entrynode = XMLHandler.getSubNodeByNr( entriesnode, "entry", i ); + // System.out.println("Reading entry:\n"+entrynode); + + JobEntryCopy je = new JobEntryCopy( entrynode, databases, slaveServers, rep, metaStore ); + + if ( je.isSpecial() && je.isMissing() ) { + addMissingEntry( (MissingEntry) je.getEntry() ); + } + JobEntryCopy prev = findJobEntry( je.getName(), 0, true ); + if ( prev != null ) { + // See if the #0 (root entry) already exists! + // + if ( je.getNr() == 0 ) { + + // Replace previous version with this one: remove it first + // + int idx = indexOfJobEntry( prev ); + removeJobEntry( idx ); + + } else if ( je.getNr() > 0 ) { + + // Use previously defined JobEntry info! + // + je.setEntry( prev.getEntry() ); + + // See if entry already exists... + prev = findJobEntry( je.getName(), je.getNr(), true ); + if ( prev != null ) { + // remove the old one! + // + int idx = indexOfJobEntry( prev ); + removeJobEntry( idx ); + } + } + } + // Add the JobEntryCopy... + addJobEntry( je ); + } + + Node hopsnode = XMLHandler.getSubNode( jobnode, "hops" ); + int ho = XMLHandler.countNodes( hopsnode, "hop" ); + for ( int i = 0; i < ho; i++ ) { + Node hopnode = XMLHandler.getSubNodeByNr( hopsnode, "hop", i ); + JobHopMeta hi = new JobHopMeta( hopnode, this ); + jobhops.add( hi ); + } + + // Read the notes... + Node notepadsnode = XMLHandler.getSubNode( jobnode, "notepads" ); + int nrnotes = XMLHandler.countNodes( notepadsnode, "notepad" ); + for ( int i = 0; i < nrnotes; i++ ) { + Node notepadnode = XMLHandler.getSubNodeByNr( notepadsnode, "notepad", i ); + NotePadMeta ni = new NotePadMeta( notepadnode ); + notes.add( ni ); + } + + // Load the attribute groups map + // + attributesMap = AttributesUtil.loadAttributes( XMLHandler.getSubNode( jobnode, AttributesUtil.XML_TAG ) ); + + ExtensionPointHandler.callExtensionPoint( LogChannel.GENERAL, KettleExtensionPoint.JobMetaLoaded.id, this ); + + clearChanged(); + } catch ( Exception e ) { + throw new KettleXMLException( BaseMessages.getString( PKG, "JobMeta.Exception.UnableToLoadJobFromXMLNode" ), e ); + } finally { + setInternalKettleVariables(); + } + } + + /** + * Read shared objects. + * + * @return the shared objects + * @throws KettleException the kettle exception + */ + public SharedObjects readSharedObjects() throws KettleException { + // Extract the shared steps, connections, etc. using the SharedObjects + // class + // + String soFile = environmentSubstitute( sharedObjectsFile ); + SharedObjects sharedObjects = new SharedObjects( soFile ); + Map objectsMap = sharedObjects.getObjectsMap(); + + // First read the databases... + // We read databases & slaves first because there might be dependencies + // that need to be resolved. + // + for ( SharedObjectInterface object : objectsMap.values() ) { + if ( object instanceof DatabaseMeta ) { + DatabaseMeta databaseMeta = (DatabaseMeta) object; + databaseMeta.shareVariablesWith( this ); + addOrReplaceDatabase( databaseMeta ); + } else if ( object instanceof SlaveServer ) { + SlaveServer slaveServer = (SlaveServer) object; + slaveServer.shareVariablesWith( this ); + addOrReplaceSlaveServer( slaveServer ); + } + } + + return sharedObjects; + } + + /* + * (non-Javadoc) + * + * @see org.pentaho.di.core.EngineMetaInterface#saveSharedObjects() + */ + public void saveSharedObjects() throws KettleException { + try { + // First load all the shared objects... + String soFile = environmentSubstitute( sharedObjectsFile ); + SharedObjects sharedObjects = new SharedObjects( soFile ); + + // Now overwrite the objects in there + List shared = new ArrayList(); + shared.addAll( databases ); + shared.addAll( slaveServers ); + + // The databases connections... + for ( int i = 0; i < shared.size(); i++ ) { + SharedObjectInterface sharedObject = (SharedObjectInterface) shared.get( i ); + if ( sharedObject.isShared() ) { + sharedObjects.storeObject( sharedObject ); + } + } + + // Save the objects + sharedObjects.saveToFile(); + } catch ( Exception e ) { + throw new KettleException( "Unable to save shared ojects", e ); + } + } + + /** + * Gets the job entry copy. + * + * @param x the x + * @param y the y + * @param iconsize the iconsize + * @return the job entry copy + */ + public JobEntryCopy getJobEntryCopy( int x, int y, int iconsize ) { + int i, s; + s = nrJobEntries(); + for ( i = s - 1; i >= 0; i-- ) { + // Back to front because drawing goes from start to end + + JobEntryCopy je = getJobEntry( i ); + Point p = je.getLocation(); + if ( p != null ) { + if ( x >= p.x && x <= p.x + iconsize && y >= p.y && y <= p.y + iconsize ) { + return je; + } + } + } + return null; + } + + /** + * Nr job entries. + * + * @return the int + */ + public int nrJobEntries() { + return jobcopies.size(); + } + + /** + * Nr job hops. + * + * @return the int + */ + public int nrJobHops() { + return jobhops.size(); + } + + /** + * Gets the job hop. + * + * @param i the i + * @return the job hop + */ + public JobHopMeta getJobHop( int i ) { + return jobhops.get( i ); + } + + /** + * Gets the job entry. + * + * @param i the i + * @return the job entry + */ + public JobEntryCopy getJobEntry( int i ) { + return jobcopies.get( i ); + } + + /** + * Adds the job entry. + * + * @param je the je + */ + public void addJobEntry( JobEntryCopy je ) { + jobcopies.add( je ); + je.setParentJobMeta( this ); + setChanged(); + } + + /** + * Adds the job hop. + * + * @param hi the hi + */ + public void addJobHop( JobHopMeta hi ) { + jobhops.add( hi ); + setChanged(); + } + + /** + * Adds the job entry. + * + * @param p the p + * @param si the si + */ + public void addJobEntry( int p, JobEntryCopy si ) { + jobcopies.add( p, si ); + changedEntries = true; + } + + /** + * Adds the job hop. + * + * @param p the p + * @param hi the hi + */ + public void addJobHop( int p, JobHopMeta hi ) { + try { + jobhops.add( p, hi ); + } catch ( IndexOutOfBoundsException e ) { + jobhops.add( hi ); + } + changedHops = true; + } + + /** + * Removes the job entry. + * + * @param i the i + */ + public void removeJobEntry( int i ) { + JobEntryCopy deleted = jobcopies.remove( i ); + if ( deleted != null ) { + if ( deleted.getEntry() instanceof MissingEntry ) { + removeMissingEntry( (MissingEntry) deleted.getEntry() ); + } + } + setChanged(); + } + + /** + * Removes the job hop. + * + * @param i the i + */ + public void removeJobHop( int i ) { + jobhops.remove( i ); + setChanged(); + } + + /** + * Removes a hop from the transformation. Also marks that the + * transformation's hops have changed. + * + * @param hop The hop to remove from the list of hops + */ + public void removeJobHop( JobHopMeta hop ) { + jobhops.remove( hop ); + setChanged(); + } + + /** + * Index of job hop. + * + * @param he the he + * @return the int + */ + public int indexOfJobHop( JobHopMeta he ) { + return jobhops.indexOf( he ); + } + + /** + * Index of job entry. + * + * @param ge the ge + * @return the int + */ + public int indexOfJobEntry( JobEntryCopy ge ) { + return jobcopies.indexOf( ge ); + } + + /** + * Sets the job entry. + * + * @param idx the idx + * @param jec the jec + */ + public void setJobEntry( int idx, JobEntryCopy jec ) { + jobcopies.set( idx, jec ); + } + + /** + * Find an existing JobEntryCopy by it's name and number + * + * @param name The name of the job entry copy + * @param nr The number of the job entry copy + * @return The JobEntryCopy or null if nothing was found! + */ + public JobEntryCopy findJobEntry( String name, int nr, boolean searchHiddenToo ) { + for ( int i = 0; i < nrJobEntries(); i++ ) { + JobEntryCopy jec = getJobEntry( i ); + if ( jec.getName().equalsIgnoreCase( name ) && jec.getNr() == nr ) { + if ( searchHiddenToo || jec.isDrawn() ) { + return jec; + } + } + } + return null; + } + + /** + * Find job entry. + * + * @param full_name_nr the full_name_nr + * @return the job entry copy + */ + public JobEntryCopy findJobEntry( String full_name_nr ) { + int i; + for ( i = 0; i < nrJobEntries(); i++ ) { + JobEntryCopy jec = getJobEntry( i ); + JobEntryInterface je = jec.getEntry(); + if ( je.toString().equalsIgnoreCase( full_name_nr ) ) { + return jec; + } + } + return null; + } + + /** + * Find job hop. + * + * @param name the name + * @return the job hop meta + */ + public JobHopMeta findJobHop( String name ) { + for ( JobHopMeta hi : jobhops ) { + // Look at all the hops + + if ( hi.toString().equalsIgnoreCase( name ) ) { + return hi; + } + } + return null; + } + + /** + * Find job hop from. + * + * @param jge the jge + * @return the job hop meta + */ + public JobHopMeta findJobHopFrom( JobEntryCopy jge ) { + if ( jge != null ) { + for ( JobHopMeta hi : jobhops ) { + + // Return the first we find! + // + if ( hi != null && ( hi.getFromEntry() != null ) && hi.getFromEntry().equals( jge ) ) { + return hi; + } + } + } + return null; + } + + /** + * Find job hop. + * + * @param from the from + * @param to the to + * @return the job hop meta + */ + public JobHopMeta findJobHop( JobEntryCopy from, JobEntryCopy to ) { + return findJobHop( from, to, false ); + } + + /** + * Find job hop. + * + * @param from the from + * @param to the to + * @param includeDisabled the include disabled + * @return the job hop meta + */ + public JobHopMeta findJobHop( JobEntryCopy from, JobEntryCopy to, boolean includeDisabled ) { + for ( JobHopMeta hi : jobhops ) { + if ( hi.isEnabled() || includeDisabled ) { + if ( hi != null && hi.getFromEntry() != null && hi.getToEntry() != null && hi.getFromEntry().equals( from ) + && hi.getToEntry().equals( to ) ) { + return hi; + } + } + } + return null; + } + + /** + * Find job hop to. + * + * @param jge the jge + * @return the job hop meta + */ + public JobHopMeta findJobHopTo( JobEntryCopy jge ) { + for ( JobHopMeta hi : jobhops ) { + if ( hi != null && hi.getToEntry() != null && hi.getToEntry().equals( jge ) ) { + // Return the first! + return hi; + } + } + return null; + } + + /** + * Find nr prev job entries. + * + * @param from the from + * @return the int + */ + public int findNrPrevJobEntries( JobEntryCopy from ) { + return findNrPrevJobEntries( from, false ); + } + + /** + * Find prev job entry. + * + * @param to the to + * @param nr the nr + * @return the job entry copy + */ + public JobEntryCopy findPrevJobEntry( JobEntryCopy to, int nr ) { + return findPrevJobEntry( to, nr, false ); + } + + /** + * Find nr prev job entries. + * + * @param to the to + * @param info the info + * @return the int + */ + public int findNrPrevJobEntries( JobEntryCopy to, boolean info ) { + int count = 0; + + for ( JobHopMeta hi : jobhops ) { + // Look at all the hops + + if ( hi.isEnabled() && hi.getToEntry().equals( to ) ) { + count++; + } + } + return count; + } + + /** + * Find prev job entry. + * + * @param to the to + * @param nr the nr + * @param info the info + * @return the job entry copy + */ + public JobEntryCopy findPrevJobEntry( JobEntryCopy to, int nr, boolean info ) { + int count = 0; + + for ( JobHopMeta hi : jobhops ) { + // Look at all the hops + + if ( hi.isEnabled() && hi.getToEntry().equals( to ) ) { + if ( count == nr ) { + return hi.getFromEntry(); + } + count++; + } + } + return null; + } + + /** + * Find nr next job entries. + * + * @param from the from + * @return the int + */ + public int findNrNextJobEntries( JobEntryCopy from ) { + int count = 0; + for ( JobHopMeta hi : jobhops ) { + // Look at all the hops + + if ( hi.isEnabled() && ( hi.getFromEntry() != null ) && hi.getFromEntry().equals( from ) ) { + count++; + } + } + return count; + } + + /** + * Find next job entry. + * + * @param from the from + * @param cnt the cnt + * @return the job entry copy + */ + public JobEntryCopy findNextJobEntry( JobEntryCopy from, int cnt ) { + int count = 0; + + for ( JobHopMeta hi : jobhops ) { + // Look at all the hops + + if ( hi.isEnabled() && ( hi.getFromEntry() != null ) && hi.getFromEntry().equals( from ) ) { + if ( count == cnt ) { + return hi.getToEntry(); + } + count++; + } + } + return null; + } + + /** + * Checks for loop. + * + * @param entry the entry + * @return true, if successful + */ + public boolean hasLoop( JobEntryCopy entry ) { + clearLoopCache(); + return hasLoop( entry, null, true ) || hasLoop( entry, null, false ); + } + + /** + * Checks for loop. + * + * @param entry the entry + * @param lookup the lookup + * @return true, if successful + */ + public boolean hasLoop( JobEntryCopy entry, JobEntryCopy lookup, boolean info ) { + String cacheKey = + entry.getName() + " - " + ( lookup != null ? lookup.getName() : "" ) + " - " + ( info ? "true" : "false" ); + + Boolean loop = loopCache.get( cacheKey ); + if ( loop != null ) { + return loop.booleanValue(); + } + + boolean hasLoop = false; + + int nr = findNrPrevJobEntries( entry, info ); + for ( int i = 0; i < nr && !hasLoop; i++ ) { + JobEntryCopy prevJobMeta = findPrevJobEntry( entry, i, info ); + if ( prevJobMeta != null ) { + if ( prevJobMeta.equals( entry ) ) { + hasLoop = true; + break; // no need to check more but caching this one below + } else if ( prevJobMeta.equals( lookup ) ) { + hasLoop = true; + break; // no need to check more but caching this one below + } else if ( hasLoop( prevJobMeta, lookup == null ? entry : lookup, info ) ) { + hasLoop = true; + break; // no need to check more but caching this one below + } + } + } + // Store in the cache... + // + loopCache.put( cacheKey, Boolean.valueOf( hasLoop ) ); + return hasLoop; + } + + /** + * Clears the loop cache. + */ + private void clearLoopCache() { + loopCache.clear(); + } + + /** + * Checks if is entry used in hops. + * + * @param jge the jge + * @return true, if is entry used in hops + */ + public boolean isEntryUsedInHops( JobEntryCopy jge ) { + JobHopMeta fr = findJobHopFrom( jge ); + JobHopMeta to = findJobHopTo( jge ); + if ( fr != null || to != null ) { + return true; + } + return false; + } + + /** + * Count entries. + * + * @param name the name + * @return the int + */ + public int countEntries( String name ) { + int count = 0; + int i; + for ( i = 0; i < nrJobEntries(); i++ ) { + // Look at all the hops; + + JobEntryCopy je = getJobEntry( i ); + if ( je.getName().equalsIgnoreCase( name ) ) { + count++; + } + } + return count; + } + + /** + * Find unused nr. + * + * @param name the name + * @return the int + */ + public int findUnusedNr( String name ) { + int nr = 1; + JobEntryCopy je = findJobEntry( name, nr, true ); + while ( je != null ) { + nr++; + // log.logDebug("findUnusedNr()", "Trying unused nr: "+nr); + je = findJobEntry( name, nr, true ); + } + return nr; + } + + /** + * Find max nr. + * + * @param name the name + * @return the int + */ + public int findMaxNr( String name ) { + int max = 0; + for ( int i = 0; i < nrJobEntries(); i++ ) { + JobEntryCopy je = getJobEntry( i ); + if ( je.getName().equalsIgnoreCase( name ) ) { + if ( je.getNr() > max ) { + max = je.getNr(); + } + } + } + return max; + } + + /** + * Proposes an alternative job entry name when the original already exists... + * + * @param entryname The job entry name to find an alternative for.. + * @return The alternative stepname. + */ + public String getAlternativeJobentryName( String entryname ) { + String newname = entryname; + JobEntryCopy jec = findJobEntry( newname ); + int nr = 1; + while ( jec != null ) { + nr++; + newname = entryname + " " + nr; + jec = findJobEntry( newname ); + } + + return newname; + } + + /** + * Gets the all job graph entries. + * + * @param name the name + * @return the all job graph entries + */ + public JobEntryCopy[] getAllJobGraphEntries( String name ) { + int count = 0; + for ( int i = 0; i < nrJobEntries(); i++ ) { + JobEntryCopy je = getJobEntry( i ); + if ( je.getName().equalsIgnoreCase( name ) ) { + count++; + } + } + JobEntryCopy[] retval = new JobEntryCopy[count]; + + count = 0; + for ( int i = 0; i < nrJobEntries(); i++ ) { + JobEntryCopy je = getJobEntry( i ); + if ( je.getName().equalsIgnoreCase( name ) ) { + retval[count] = je; + count++; + } + } + return retval; + } + + /** + * Gets the all job hops using. + * + * @param name the name + * @return the all job hops using + */ + public JobHopMeta[] getAllJobHopsUsing( String name ) { + List hops = new ArrayList(); + + for ( JobHopMeta hi : jobhops ) { + // Look at all the hops + + if ( hi.getFromEntry() != null && hi.getToEntry() != null ) { + if ( hi.getFromEntry().getName().equalsIgnoreCase( name ) || hi.getToEntry().getName() + .equalsIgnoreCase( name ) ) { + hops.add( hi ); + } + } + } + return hops.toArray( new JobHopMeta[hops.size()] ); + } + + public boolean isPathExist( JobEntryInterface from, JobEntryInterface to ) { + for ( JobHopMeta hi : jobhops ) { + if ( hi.getFromEntry() != null && hi.getToEntry() != null ) { + if ( hi.getFromEntry().getName().equalsIgnoreCase( from.getName() ) ) { + if ( hi.getToEntry().getName().equalsIgnoreCase( to.getName() ) ) { + return true; + } + if ( isPathExist( hi.getToEntry().getEntry(), to ) ) { + return true; + } + } + } + } + + return false; + } + + /** + * Select all. + */ + public void selectAll() { + int i; + for ( i = 0; i < nrJobEntries(); i++ ) { + JobEntryCopy ce = getJobEntry( i ); + ce.setSelected( true ); + } + for ( i = 0; i < nrNotes(); i++ ) { + NotePadMeta ni = getNote( i ); + ni.setSelected( true ); + } + setChanged(); + notifyObservers( "refreshGraph" ); + } + + /** + * Unselect all. + */ + public void unselectAll() { + int i; + for ( i = 0; i < nrJobEntries(); i++ ) { + JobEntryCopy ce = getJobEntry( i ); + ce.setSelected( false ); + } + for ( i = 0; i < nrNotes(); i++ ) { + NotePadMeta ni = getNote( i ); + ni.setSelected( false ); + } + } + + /** + * Gets the maximum. + * + * @return the maximum + */ + public Point getMaximum() { + int maxx = 0, maxy = 0; + for ( int i = 0; i < nrJobEntries(); i++ ) { + JobEntryCopy entry = getJobEntry( i ); + Point loc = entry.getLocation(); + if ( loc.x > maxx ) { + maxx = loc.x; + } + if ( loc.y > maxy ) { + maxy = loc.y; + } + } + for ( int i = 0; i < nrNotes(); i++ ) { + NotePadMeta ni = getNote( i ); + Point loc = ni.getLocation(); + if ( loc.x + ni.width > maxx ) { + maxx = loc.x + ni.width; + } + if ( loc.y + ni.height > maxy ) { + maxy = loc.y + ni.height; + } + } + + return new Point( maxx + 100, maxy + 100 ); + } + + /** + * Get the minimum point on the canvas of a job + * + * @return Minimum coordinate of a step in the job + */ + public Point getMinimum() { + int minx = Integer.MAX_VALUE; + int miny = Integer.MAX_VALUE; + for ( int i = 0; i < nrJobEntries(); i++ ) { + JobEntryCopy jobEntryCopy = getJobEntry( i ); + Point loc = jobEntryCopy.getLocation(); + if ( loc.x < minx ) { + minx = loc.x; + } + if ( loc.y < miny ) { + miny = loc.y; + } + } + for ( int i = 0; i < nrNotes(); i++ ) { + NotePadMeta notePadMeta = getNote( i ); + Point loc = notePadMeta.getLocation(); + if ( loc.x < minx ) { + minx = loc.x; + } + if ( loc.y < miny ) { + miny = loc.y; + } + } + + if ( minx > 20 ) { + minx -= 20; + } else { + minx = 0; + } + if ( miny > 20 ) { + miny -= 20; + } else { + miny = 0; + } + + return new Point( minx, miny ); + } + + /** + * Gets the selected locations. + * + * @return the selected locations + */ + public Point[] getSelectedLocations() { + List selectedEntries = getSelectedEntries(); + Point[] retval = new Point[selectedEntries.size()]; + for ( int i = 0; i < retval.length; i++ ) { + JobEntryCopy si = selectedEntries.get( i ); + Point p = si.getLocation(); + retval[i] = new Point( p.x, p.y ); // explicit copy of location + } + return retval; + } + + /** + * Get all the selected note locations + * + * @return The selected step and notes locations. + */ + public Point[] getSelectedNoteLocations() { + List points = new ArrayList(); + + for ( NotePadMeta ni : getSelectedNotes() ) { + Point p = ni.getLocation(); + points.add( new Point( p.x, p.y ) ); // explicit copy of location + } + + return points.toArray( new Point[points.size()] ); + } + + /** + * Gets the selected entries. + * + * @return the selected entries + */ + public List getSelectedEntries() { + List selection = new ArrayList(); + for ( JobEntryCopy je : jobcopies ) { + if ( je.isSelected() ) { + selection.add( je ); + } + } + return selection; + } + + /** + * Gets the entry indexes. + * + * @param entries the entries + * @return the entry indexes + */ + public int[] getEntryIndexes( List entries ) { + int[] retval = new int[entries.size()]; + + for ( int i = 0; i < entries.size(); i++ ) { + retval[i] = indexOfJobEntry( entries.get( i ) ); + } + + return retval; + } + + /** + * Find start. + * + * @return the job entry copy + */ + public JobEntryCopy findStart() { + for ( int i = 0; i < nrJobEntries(); i++ ) { + if ( getJobEntry( i ).isStart() ) { + return getJobEntry( i ); + } + } + return null; + } + + /** + * Gets a textual representation of the job. If its name has been set, it will be returned, otherwise the classname is + * returned. + * + * @return the textual representation of the job. + */ + public String toString() { + if ( !Const.isEmpty( filename ) ) { + if ( Const.isEmpty( name ) ) { + return filename; + } else { + return filename + " : " + name; + } + } + + if ( name != null ) { + if ( directory != null ) { + String path = directory.getPath(); + if ( path.endsWith( RepositoryDirectory.DIRECTORY_SEPARATOR ) ) { + return path + name; + } else { + return path + RepositoryDirectory.DIRECTORY_SEPARATOR + name; + } + } else { + return name; + } + } else { + return JobMeta.class.getName(); + } + } + + /** + * Gets the boolean value of batch id passed. + * + * @return Returns the batchIdPassed. + */ + public boolean isBatchIdPassed() { + return batchIdPassed; + } + + /** + * Sets the batch id passed. + * + * @param batchIdPassed The batchIdPassed to set. + */ + public void setBatchIdPassed( boolean batchIdPassed ) { + this.batchIdPassed = batchIdPassed; + } + + public List getSQLStatements( Repository repository, ProgressMonitorListener monitor ) + throws KettleException { + return getSQLStatements( repository, null, monitor ); + } + + /** + * Builds a list of all the SQL statements that this transformation needs in order to work properly. + * + * @return An ArrayList of SQLStatement objects. + */ + public List getSQLStatements( Repository repository, IMetaStore metaStore, + ProgressMonitorListener monitor ) throws KettleException { + if ( monitor != null ) { + monitor + .beginTask( BaseMessages.getString( PKG, "JobMeta.Monitor.GettingSQLNeededForThisJob" ), nrJobEntries() + 1 ); + } + List stats = new ArrayList(); + + for ( int i = 0; i < nrJobEntries(); i++ ) { + JobEntryCopy copy = getJobEntry( i ); + if ( monitor != null ) { + monitor.subTask( BaseMessages.getString( PKG, "JobMeta.Monitor.GettingSQLForJobEntryCopy" ) + copy + "]" ); + } + stats.addAll( copy.getEntry().getSQLStatements( repository, metaStore, this ) ); + stats.addAll( compatibleGetEntrySQLStatements( copy.getEntry(), repository ) ); + stats.addAll( compatibleGetEntrySQLStatements( copy.getEntry(), repository, this ) ); + if ( monitor != null ) { + monitor.worked( 1 ); + } + } + + // Also check the sql for the logtable... + if ( monitor != null ) { + monitor.subTask( BaseMessages.getString( PKG, "JobMeta.Monitor.GettingSQLStatementsForJobLogTables" ) ); + } + if ( jobLogTable.getDatabaseMeta() != null && !Const.isEmpty( jobLogTable.getTableName() ) ) { + Database db = new Database( this, jobLogTable.getDatabaseMeta() ); + try { + db.connect(); + RowMetaInterface fields = jobLogTable.getLogRecord( LogStatus.START, null, null ).getRowMeta(); + String sql = db.getDDL( jobLogTable.getTableName(), fields ); + if ( sql != null && sql.length() > 0 ) { + SQLStatement stat = new SQLStatement( BaseMessages.getString( PKG, "JobMeta.SQLFeedback.ThisJob" ), + jobLogTable.getDatabaseMeta(), sql ); + stats.add( stat ); + } + } catch ( KettleDatabaseException dbe ) { + SQLStatement stat = new SQLStatement( BaseMessages.getString( PKG, "JobMeta.SQLFeedback.ThisJob" ), + jobLogTable.getDatabaseMeta(), null ); + stat.setError( + BaseMessages.getString( PKG, "JobMeta.SQLFeedback.ErrorObtainingJobLogTableInfo" ) + dbe.getMessage() ); + stats.add( stat ); + } finally { + db.disconnect(); + } + } + if ( monitor != null ) { + monitor.worked( 1 ); + } + if ( monitor != null ) { + monitor.done(); + } + + return stats; + } + + @SuppressWarnings( "deprecation" ) + private Collection compatibleGetEntrySQLStatements( JobEntryInterface entry, + Repository repository, VariableSpace variableSpace ) throws KettleException { + return entry.getSQLStatements( repository, variableSpace ); + } + + @SuppressWarnings( "deprecation" ) + private Collection compatibleGetEntrySQLStatements( JobEntryInterface entry, + Repository repository ) throws KettleException { + return entry.getSQLStatements( repository ); + } + + /** + * Gets the arguments used for this job. + * + * @return Returns the arguments. + * @deprecated Moved to the Job class + */ + @Deprecated + public String[] getArguments() { + return arguments; + } + + /** + * Sets the arguments. + * + * @param arguments The arguments to set. + * @deprecated moved to the job class + */ + @Deprecated + public void setArguments( String[] arguments ) { + this.arguments = arguments; + } + + /** + * Get a list of all the strings used in this job. + * + * @return A list of StringSearchResult with strings used in the job + */ + public List getStringList( boolean searchSteps, boolean searchDatabases, boolean searchNotes ) { + List stringList = new ArrayList(); + + if ( searchSteps ) { + // Loop over all steps in the transformation and see what the used + // vars are... + for ( int i = 0; i < nrJobEntries(); i++ ) { + JobEntryCopy entryMeta = getJobEntry( i ); + stringList.add( new StringSearchResult( entryMeta.getName(), entryMeta, this, + BaseMessages.getString( PKG, "JobMeta.SearchMetadata.JobEntryName" ) ) ); + if ( entryMeta.getDescription() != null ) { + stringList.add( new StringSearchResult( entryMeta.getDescription(), entryMeta, this, + BaseMessages.getString( PKG, "JobMeta.SearchMetadata.JobEntryDescription" ) ) ); + } + JobEntryInterface metaInterface = entryMeta.getEntry(); + StringSearcher.findMetaData( metaInterface, 1, stringList, entryMeta, this ); + } + } + + // Loop over all steps in the transformation and see what the used vars + // are... + if ( searchDatabases ) { + for ( int i = 0; i < nrDatabases(); i++ ) { + DatabaseMeta meta = getDatabase( i ); + stringList.add( new StringSearchResult( meta.getName(), meta, this, + BaseMessages.getString( PKG, "JobMeta.SearchMetadata.DatabaseConnectionName" ) ) ); + if ( meta.getHostname() != null ) { + stringList.add( new StringSearchResult( meta.getHostname(), meta, this, + BaseMessages.getString( PKG, "JobMeta.SearchMetadata.DatabaseHostName" ) ) ); + } + if ( meta.getDatabaseName() != null ) { + stringList.add( new StringSearchResult( meta.getDatabaseName(), meta, this, + BaseMessages.getString( PKG, "JobMeta.SearchMetadata.DatabaseName" ) ) ); + } + if ( meta.getUsername() != null ) { + stringList.add( new StringSearchResult( meta.getUsername(), meta, this, + BaseMessages.getString( PKG, "JobMeta.SearchMetadata.DatabaseUsername" ) ) ); + } + if ( meta.getPluginId() != null ) { + stringList.add( new StringSearchResult( meta.getPluginId(), meta, this, + BaseMessages.getString( PKG, "JobMeta.SearchMetadata.DatabaseTypeDescription" ) ) ); + } + if ( meta.getDatabasePortNumberString() != null ) { + stringList.add( new StringSearchResult( meta.getDatabasePortNumberString(), meta, this, + BaseMessages.getString( PKG, "JobMeta.SearchMetadata.DatabasePort" ) ) ); + } + if ( meta.getServername() != null ) { + stringList.add( new StringSearchResult( meta.getServername(), meta, this, + BaseMessages.getString( PKG, "JobMeta.SearchMetadata.DatabaseServer" ) ) ); + } + // if ( includePasswords ) + // { + if ( meta.getPassword() != null ) { + stringList.add( new StringSearchResult( meta.getPassword(), meta, this, + BaseMessages.getString( PKG, "JobMeta.SearchMetadata.DatabasePassword" ) ) ); + // } + } + } + } + + // Loop over all steps in the transformation and see what the used vars + // are... + if ( searchNotes ) { + for ( int i = 0; i < nrNotes(); i++ ) { + NotePadMeta meta = getNote( i ); + if ( meta.getNote() != null ) { + stringList.add( new StringSearchResult( meta.getNote(), meta, this, + BaseMessages.getString( PKG, "JobMeta.SearchMetadata.NotepadText" ) ) ); + } + } + } + + return stringList; + } + + /** + * Gets the used variables. + * + * @return the used variables + */ + public List getUsedVariables() { + // Get the list of Strings. + List stringList = getStringList( true, true, false ); + + List varList = new ArrayList(); + + // Look around in the strings, see what we find... + for ( StringSearchResult result : stringList ) { + StringUtil.getUsedVariables( result.getString(), varList, false ); + } + + return varList; + } + + /** + * Have job entries changed. + * + * @return true, if successful + */ + public boolean haveJobEntriesChanged() { + if ( changedEntries ) { + return true; + } + + for ( int i = 0; i < nrJobEntries(); i++ ) { + JobEntryCopy entry = getJobEntry( i ); + if ( entry.hasChanged() ) { + return true; + } + } + return false; + } + + /** + * Have job hops changed. + * + * @return true, if successful + */ + public boolean haveJobHopsChanged() { + if ( changedHops ) { + return true; + } + + for ( JobHopMeta hi : jobhops ) { + // Look at all the hops + + if ( hi.hasChanged() ) { + return true; + } + } + return false; + } + + /** + * Gets the version of the job. + * + * @return The version of the job + */ + public String getJobversion() { + return jobVersion; + } + + /** + * Gets the status of the job. + * + * @return the status of the job + */ + public int getJobstatus() { + return jobStatus; + } + + /** + * Set the version of the job. + * + * @param jobVersion The new version description of the job + */ + public void setJobversion( String jobVersion ) { + this.jobVersion = jobVersion; + } + + /** + * Set the status of the job. + * + * @param jobStatus The new status description of the job + */ + public void setJobstatus( int jobStatus ) { + this.jobStatus = jobStatus; + } + + /** + * Find a jobentry with a certain ID in a list of job entries. + * + * @param jobentries The List of jobentries + * @param id_jobentry The id of the jobentry + * @return The JobEntry object if one was found, null otherwise. + */ + public static final JobEntryInterface findJobEntry( List jobentries, ObjectId id_jobentry ) { + if ( jobentries == null ) { + return null; + } + + for ( JobEntryInterface je : jobentries ) { + if ( je.getObjectId() != null && je.getObjectId().equals( id_jobentry ) ) { + return je; + } + } + return null; + } + + /** + * Find a jobentrycopy with a certain ID in a list of job entry copies. + * + * @param jobcopies The List of jobentry copies + * @param id_jobentry_copy The id of the jobentry copy + * @return The JobEntryCopy object if one was found, null otherwise. + */ + public static final JobEntryCopy findJobEntryCopy( List jobcopies, ObjectId id_jobentry_copy ) { + if ( jobcopies == null ) { + return null; + } + + for ( JobEntryCopy jec : jobcopies ) { + if ( jec.getObjectId() != null && jec.getObjectId().equals( id_jobentry_copy ) ) { + return jec; + } + } + return null; + } + + /** + * This method sets various internal kettle variables that can be used by the transformation. + */ + @Override + public void setInternalKettleVariables( VariableSpace var ) { + setInternalFilenameKettleVariables( var ); + setInternalNameKettleVariable( var ); + + // The name of the directory in the repository + variables + .setVariable( Const.INTERNAL_VARIABLE_JOB_REPOSITORY_DIRECTORY, directory != null ? directory.getPath() : "" ); + + boolean hasRepoDir = getRepositoryDirectory() != null && getRepository() != null; + + // setup fallbacks + if ( hasRepoDir ) { + variables.setVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY, + variables.getVariable( Const.INTERNAL_VARIABLE_JOB_REPOSITORY_DIRECTORY ) ); + } else { + variables.setVariable( Const.INTERNAL_VARIABLE_JOB_REPOSITORY_DIRECTORY, + variables.getVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY ) ); + } + + variables.setVariable( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY, variables.getVariable( + repository != null ? Const.INTERNAL_VARIABLE_JOB_REPOSITORY_DIRECTORY + : Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY ) ); + } + + /** + * Sets the internal name kettle variable. + * + * @param var the new internal name kettle variable + */ + @Override + protected void setInternalNameKettleVariable( VariableSpace var ) { + // The name of the job + variables.setVariable( Const.INTERNAL_VARIABLE_JOB_NAME, Const.NVL( name, "" ) ); + } + + /** + * Sets the internal filename kettle variables. + * + * @param var the new internal filename kettle variables + */ + @Override + protected void setInternalFilenameKettleVariables( VariableSpace var ) { + if ( filename != null ) { + // we have a filename that's defined. + try { + FileObject fileObject = KettleVFS.getFileObject( filename, var ); + FileName fileName = fileObject.getName(); + + // The filename of the job + variables.setVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_NAME, fileName.getBaseName() ); + + // The directory of the job + FileName fileDir = fileName.getParent(); + variables.setVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY, fileDir.getURI() ); + } catch ( Exception e ) { + variables.setVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY, "" ); + variables.setVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_NAME, "" ); + } + } else { + variables.setVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY, "" ); + variables.setVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_NAME, "" ); + } + } + + @Deprecated + public void checkJobEntries( List remarks, boolean only_selected, + ProgressMonitorListener monitor ) { + checkJobEntries( remarks, only_selected, monitor, this, null, null ); + } + + /** + * Check all job entries within the job. Each Job Entry has the opportunity to check their own settings. + * + * @param remarks List of CheckResult remarks inserted into by each JobEntry + * @param only_selected true if you only want to check the selected jobs + * @param monitor Progress monitor (not presently in use) + */ + public void checkJobEntries( List remarks, boolean only_selected, + ProgressMonitorListener monitor, VariableSpace space, Repository repository, IMetaStore metaStore ) { + remarks.clear(); // Empty remarks + if ( monitor != null ) { + monitor.beginTask( BaseMessages.getString( PKG, "JobMeta.Monitor.VerifyingThisJobEntryTask.Title" ), + jobcopies.size() + 2 ); + } + boolean stop_checking = false; + for ( int i = 0; i < jobcopies.size() && !stop_checking; i++ ) { + JobEntryCopy copy = jobcopies.get( i ); // get the job entry copy + if ( ( !only_selected ) || ( only_selected && copy.isSelected() ) ) { + JobEntryInterface entry = copy.getEntry(); + if ( entry != null ) { + if ( monitor != null ) { + monitor + .subTask( BaseMessages.getString( PKG, "JobMeta.Monitor.VerifyingJobEntry.Title", entry.getName() ) ); + } + entry.check( remarks, this, space, repository, metaStore ); + compatibleEntryCheck( entry, remarks ); + if ( monitor != null ) { + monitor.worked( 1 ); // progress bar... + if ( monitor.isCanceled() ) { + stop_checking = true; + } + } + } + } + if ( monitor != null ) { + monitor.worked( 1 ); + } + } + if ( monitor != null ) { + monitor.done(); + } + } + + @SuppressWarnings( "deprecation" ) + private void compatibleEntryCheck( JobEntryInterface entry, List remarks ) { + entry.check( remarks, this ); + } + + /** + * Gets the resource dependencies. + * + * @return the resource dependencies + */ + public List getResourceDependencies() { + List resourceReferences = new ArrayList(); + JobEntryCopy copy = null; + JobEntryInterface entry = null; + for ( int i = 0; i < jobcopies.size(); i++ ) { + copy = jobcopies.get( i ); // get the job entry copy + entry = copy.getEntry(); + resourceReferences.addAll( entry.getResourceDependencies( this ) ); + } + + return resourceReferences; + } + + public String exportResources( VariableSpace space, Map definitions, + ResourceNamingInterface namingInterface, Repository repository, IMetaStore metaStore ) throws KettleException { + String resourceName = null; + try { + // Handle naming for both repository and XML bases resources... + // + String baseName; + String originalPath; + String fullname; + String extension = "kjb"; + if ( Const.isEmpty( getFilename() ) ) { + // Assume repository... + // + originalPath = directory.getPath(); + baseName = getName(); + fullname = + directory.getPath() + ( directory.getPath().endsWith( RepositoryDirectory.DIRECTORY_SEPARATOR ) ? "" + : RepositoryDirectory.DIRECTORY_SEPARATOR ) + getName() + "." + extension; // + } else { + // Assume file + // + FileObject fileObject = KettleVFS.getFileObject( space.environmentSubstitute( getFilename() ), space ); + originalPath = fileObject.getParent().getName().getPath(); + baseName = fileObject.getName().getBaseName(); + fullname = fileObject.getName().getPath(); + } + + resourceName = namingInterface + .nameResource( baseName, originalPath, extension, ResourceNamingInterface.FileNamingType.JOB ); + ResourceDefinition definition = definitions.get( resourceName ); + if ( definition == null ) { + // If we do this once, it will be plenty :-) + // + JobMeta jobMeta = (JobMeta) this.realClone( false ); + + // All objects get re-located to the root folder, + // but, when exporting, we need to see current directory + // in order to make 'Internal.Entry.Current.Directory' variable work + jobMeta.setRepositoryDirectory( directory ); + + // Add used resources, modify transMeta accordingly + // Go through the list of steps, etc. + // These critters change the steps in the cloned TransMeta + // At the end we make a new XML version of it in "exported" + // format... + + // loop over steps, databases will be exported to XML anyway. + // + for ( JobEntryCopy jobEntry : jobMeta.jobcopies ) { + compatibleJobEntryExportResources( jobEntry.getEntry(), jobMeta, definitions, namingInterface, repository ); + jobEntry.getEntry().exportResources( jobMeta, definitions, namingInterface, repository, metaStore ); + } + + // Set a number of parameters for all the data files referenced so far... + // + Map directoryMap = namingInterface.getDirectoryMap(); + if ( directoryMap != null ) { + for ( String directory : directoryMap.keySet() ) { + String parameterName = directoryMap.get( directory ); + jobMeta.addParameterDefinition( parameterName, directory, "Data file path discovered during export" ); + } + } + + // At the end, add ourselves to the map... + // + String jobMetaContent = jobMeta.getXML(); + + definition = new ResourceDefinition( resourceName, jobMetaContent ); + + // Also remember the original filename (if any), including variables etc. + // + if ( Const.isEmpty( this.getFilename() ) ) { // Repository + definition.setOrigin( fullname ); + } else { + definition.setOrigin( this.getFilename() ); + } + + definitions.put( fullname, definition ); + } + } catch ( FileSystemException e ) { + throw new KettleException( + BaseMessages.getString( PKG, "JobMeta.Exception.AnErrorOccuredReadingJob", getFilename() ), e ); + } catch ( KettleFileException e ) { + throw new KettleException( + BaseMessages.getString( PKG, "JobMeta.Exception.AnErrorOccuredReadingJob", getFilename() ), e ); + } + + return resourceName; + } + + @SuppressWarnings( "deprecation" ) + private void compatibleJobEntryExportResources( JobEntryInterface entry, JobMeta jobMeta, + Map definitions, ResourceNamingInterface namingInterface, Repository repository2 ) + throws KettleException { + entry.exportResources( jobMeta, definitions, namingInterface, repository ); + } + + /** + * See if the name of the supplied job entry copy doesn't collide with any other job entry copy in the job. + * + * @param je The job entry copy to verify the name for. + */ + public void renameJobEntryIfNameCollides( JobEntryCopy je ) { + // First see if the name changed. + // If so, we need to verify that the name is not already used in the + // job. + // + String newname = je.getName(); + + // See if this name exists in the other job entries + // + boolean found; + int nr = 1; + do { + found = false; + for ( JobEntryCopy copy : jobcopies ) { + if ( copy != je && copy.getName().equalsIgnoreCase( newname ) && copy.getNr() == 0 ) { + found = true; + } + } + if ( found ) { + nr++; + newname = je.getName() + " (" + nr + ")"; + } + } while ( found ); + + // Rename if required. + // + je.setName( newname ); + } + + /** + * Gets the job copies. + * + * @return the job copies + */ + public List getJobCopies() { + return jobcopies; + } + + /** + * Gets the jobhops. + * + * @return the jobhops + */ + public List getJobhops() { + return jobhops; + } + + /* + * (non-Javadoc) + * + * @see org.pentaho.di.repository.RepositoryElementInterface#getRepositoryElementType() + */ + public RepositoryObjectType getRepositoryElementType() { + return REPOSITORY_ELEMENT_TYPE; + } + + /** + * Create a unique list of job entry interfaces + * + * @return + */ + public List composeJobEntryInterfaceList() { + List list = new ArrayList(); + + for ( JobEntryCopy copy : jobcopies ) { + if ( !list.contains( copy.getEntry() ) ) { + list.add( copy.getEntry() ); + } + } + + return list; + } + + /* + * (non-Javadoc) + * + * @see org.pentaho.di.core.logging.LoggingObjectInterface#getLogChannelId() + */ + public String getLogChannelId() { + return null; + } + + /* + * (non-Javadoc) + * + * @see org.pentaho.di.core.logging.LoggingObjectInterface#getObjectType() + */ + public LoggingObjectType getObjectType() { + return LoggingObjectType.JOBMETA; + } + + /** + * Gets the job entry log table. + * + * @return the jobEntryLogTable + */ + public JobEntryLogTable getJobEntryLogTable() { + return jobEntryLogTable; + } + + /** + * Sets the job entry log table. + * + * @param jobEntryLogTable the jobEntryLogTable to set + */ + public void setJobEntryLogTable( JobEntryLogTable jobEntryLogTable ) { + this.jobEntryLogTable = jobEntryLogTable; + } + + /** + * Gets the log tables. + * + * @return the log tables + */ + public List getLogTables() { + List logTables = new ArrayList(); + logTables.add( jobLogTable ); + logTables.add( jobEntryLogTable ); + logTables.add( channelLogTable ); + logTables.addAll( extraLogTables ); + return logTables; + } + + /** + * Checks whether the job has repository references. + * + * @return true if the job has repository references, false otherwise + */ + public boolean hasRepositoryReferences() { + for ( JobEntryCopy copy : jobcopies ) { + if ( copy.getEntry().hasRepositoryReferences() ) { + return true; + } + } + return false; + } + + /** + * Look up the references after import + * + * @param repository the repository to reference. + */ + public void lookupRepositoryReferences( Repository repository ) throws KettleException { + KettleException lastThrownException = null; + Map notFoundedReferences = new HashMap<>(); + for ( JobEntryCopy copy : jobcopies ) { + if ( copy.getEntry().hasRepositoryReferences() ) { + try { + copy.getEntry().lookupRepositoryReferences( repository ); + } catch ( IdNotFoundException e ) { + lastThrownException = e; + String path = e.getPathToObject(); + String name = e.getObjectName(); + String key = StringUtils.isEmpty( path ) || path.equals( "null" ) ? name : path + "/" + name; + notFoundedReferences.put( key, e.getObjectType() ); + } + } + } + if ( lastThrownException != null && !notFoundedReferences.isEmpty() ) { + throw new LookupReferencesException( lastThrownException, notFoundedReferences ); + } + } + + /** + * Returns whether or not the job is gathering metrics. For a JobMeta this is always false. + * + * @return is gathering metrics = false; + */ + @Override + public boolean isGatheringMetrics() { + return false; + } + + /** + * Sets whether or not the job is gathering metrics. This is a stub with not executable code. + */ + @Override + public void setGatheringMetrics( boolean gatheringMetrics ) { + } + + @Override + public boolean isForcingSeparateLogging() { + return false; + } + + @Override + public void setForcingSeparateLogging( boolean forcingSeparateLogging ) { + } + + /** + * This method needs to be called to store those objects which are used and referenced in the job metadata but not + * saved in the serialization. + * + * @param metaStore The store to save to + * @throws MetaStoreException in case there is an error. + */ + public void saveMetaStoreObjects( Repository repository, IMetaStore metaStore ) throws MetaStoreException { + } + + public List getExtraLogTables() { + return extraLogTables; + } + + public void setExtraLogTables( List extraLogTables ) { + this.extraLogTables = extraLogTables; + } + + public boolean containsJobCopy( JobEntryCopy jobCopy ) { + return jobcopies.contains( jobCopy ); + } + + public List getMissingEntries() { + return missingEntries; + } + + public void addMissingEntry( MissingEntry missingEntry ) { + if ( missingEntries == null ) { + missingEntries = new ArrayList(); + } + missingEntries.add( missingEntry ); + } + + public void removeMissingEntry( MissingEntry missingEntry ) { + if ( missingEntries != null && missingEntry != null && missingEntries.contains( missingEntry ) ) { + missingEntries.remove( missingEntry ); + } + } + + public boolean hasMissingPlugins() { + return missingEntries != null && !missingEntries.isEmpty(); + } +} diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/job/entries/job/JobEntryJob.java b/pentaho-kettle/src/main/java/org/pentaho/di/job/entries/job/JobEntryJob.java new file mode 100644 index 0000000..b19920a --- /dev/null +++ b/pentaho-kettle/src/main/java/org/pentaho/di/job/entries/job/JobEntryJob.java @@ -0,0 +1,1567 @@ +/*! ****************************************************************************** + * + * Pentaho Data Integration + * + * Copyright (C) 2002-2015 by Pentaho : http://www.pentaho.com + * + ******************************************************************************* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + ******************************************************************************/ + +package org.pentaho.di.job.entries.job; + +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.List; +import java.util.Map; +import java.util.UUID; + +import org.apache.commons.vfs2.FileObject; +import org.pentaho.di.cluster.SlaveServer; +import org.pentaho.di.core.CheckResultInterface; +import org.pentaho.di.core.Const; +import org.pentaho.di.core.ObjectLocationSpecificationMethod; +import org.pentaho.di.core.Result; +import org.pentaho.di.core.ResultFile; +import org.pentaho.di.core.RowMetaAndData; +import org.pentaho.di.core.SQLStatement; +import org.pentaho.di.core.database.DatabaseMeta; +import org.pentaho.di.core.exception.KettleDatabaseException; +import org.pentaho.di.core.exception.KettleException; +import org.pentaho.di.core.exception.KettleXMLException; +import org.pentaho.di.core.logging.LogChannelFileWriter; +import org.pentaho.di.core.logging.LogLevel; +import org.pentaho.di.core.parameters.DuplicateParamException; +import org.pentaho.di.core.parameters.NamedParams; +import org.pentaho.di.core.parameters.NamedParamsDefault; +import org.pentaho.di.core.util.CurrentDirectoryResolver; +import org.pentaho.di.core.variables.VariableSpace; +import org.pentaho.di.core.variables.Variables; +import org.pentaho.di.core.vfs.KettleVFS; +import org.pentaho.di.core.xml.XMLHandler; +import org.pentaho.di.i18n.BaseMessages; +import org.pentaho.di.job.DelegationListener; +import org.pentaho.di.job.Job; +import org.pentaho.di.job.JobExecutionConfiguration; +import org.pentaho.di.job.JobMeta; +import org.pentaho.di.job.entry.JobEntryBase; +import org.pentaho.di.job.entry.JobEntryInterface; +import org.pentaho.di.job.entry.validator.AndValidator; +import org.pentaho.di.job.entry.validator.JobEntryValidatorUtils; +import org.pentaho.di.repository.ObjectId; +import org.pentaho.di.repository.Repository; +import org.pentaho.di.repository.RepositoryDirectory; +import org.pentaho.di.repository.RepositoryDirectoryInterface; +import org.pentaho.di.repository.RepositoryImportLocation; +import org.pentaho.di.repository.RepositoryObject; +import org.pentaho.di.repository.RepositoryObjectType; +import org.pentaho.di.repository.StringObjectId; +import org.pentaho.di.resource.ResourceDefinition; +import org.pentaho.di.resource.ResourceEntry; +import org.pentaho.di.resource.ResourceEntry.ResourceType; +import org.pentaho.di.resource.ResourceNamingInterface; +import org.pentaho.di.resource.ResourceReference; +import org.pentaho.di.www.SlaveServerJobStatus; +import org.pentaho.metastore.api.IMetaStore; +import org.w3c.dom.Node; + +/** + * Recursive definition of a Job. This step means that an entire Job has to be executed. It can be the same Job, but + * just make sure that you don't get an endless loop. Provide an escape routine using JobEval. + * + * @author Matt + * @since 01-10-2003, Rewritten on 18-06-2004 + * + */ +public class JobEntryJob extends JobEntryBase implements Cloneable, JobEntryInterface { + private static Class PKG = JobEntryJob.class; // for i18n purposes, needed by Translator2!! + + private String filename; + private String jobname; + private String directory; + private ObjectId jobObjectId; + private ObjectLocationSpecificationMethod specificationMethod; + + public String[] arguments; + public boolean argFromPrevious; + public boolean paramsFromPrevious; + public boolean execPerRow; + + public String[] parameters; + public String[] parameterFieldNames; + public String[] parameterValues; + + public boolean setLogfile; + public String logfile, logext; + public boolean addDate, addTime; + public LogLevel logFileLevel; + + public boolean parallel; + private String directoryPath; + public boolean setAppendLogfile; + public boolean createParentFolder; + + public boolean waitingToFinish = true; + public boolean followingAbortRemotely; + + public boolean expandingRemoteJob; + + private String remoteSlaveServerName; + public boolean passingAllParameters = true; + + private boolean passingExport; + + public static final LogLevel DEFAULT_LOG_LEVEL = LogLevel.NOTHING; + + private Job job; + + public JobEntryJob( String name ) { + super( name, "" ); + } + + public JobEntryJob() { + this( "" ); + clear(); + } + + private void allocateArgs( int nrArgs ) { + arguments = new String[nrArgs]; + } + + private void allocateParams( int nrParameters ) { + parameters = new String[nrParameters]; + parameterFieldNames = new String[nrParameters]; + parameterValues = new String[nrParameters]; + } + + public Object clone() { + JobEntryJob je = (JobEntryJob) super.clone(); + if ( arguments != null ) { + int nrArgs = arguments.length; + je.allocateArgs( nrArgs ); + System.arraycopy( arguments, 0, je.arguments, 0, nrArgs ); + } + if ( parameters != null ) { + int nrParameters = parameters.length; + je.allocateParams( nrParameters ); + System.arraycopy( parameters, 0, je.parameters, 0, nrParameters ); + System.arraycopy( parameterFieldNames, 0, je.parameterFieldNames, 0, nrParameters ); + System.arraycopy( parameterValues, 0, je.parameterValues, 0, nrParameters ); + } + return je; + } + + public void setFileName( String n ) { + filename = n; + } + + /** + * @deprecated use getFilename() instead. + * @return the filename + */ + @Deprecated + public String getFileName() { + return filename; + } + + public String getFilename() { + return filename; + } + + public String getRealFilename() { + return environmentSubstitute( getFilename() ); + } + + public void setJobName( String jobname ) { + this.jobname = jobname; + } + + public String getJobName() { + return jobname; + } + + public String getDirectory() { + return directory; + } + + public void setDirectory( String directory ) { + this.directory = directory; + } + + public boolean isPassingExport() { + return passingExport; + } + + public void setPassingExport( boolean passingExport ) { + this.passingExport = passingExport; + } + + public String getLogFilename() { + String retval = ""; + if ( setLogfile ) { + retval += logfile == null ? "" : logfile; + Calendar cal = Calendar.getInstance(); + if ( addDate ) { + SimpleDateFormat sdf = new SimpleDateFormat( "yyyyMMdd" ); + retval += "_" + sdf.format( cal.getTime() ); + } + if ( addTime ) { + SimpleDateFormat sdf = new SimpleDateFormat( "HHmmss" ); + retval += "_" + sdf.format( cal.getTime() ); + } + if ( logext != null && logext.length() > 0 ) { + retval += "." + logext; + } + } + return retval; + } + + public String getXML() { + StringBuffer retval = new StringBuffer( 200 ); + + retval.append( super.getXML() ); + + // specificationMethod + // + retval.append( " " ).append( + XMLHandler.addTagValue( "specification_method", specificationMethod == null ? null : specificationMethod + .getCode() ) ); + retval.append( " " ).append( + XMLHandler.addTagValue( "job_object_id", jobObjectId == null ? null : jobObjectId.toString() ) ); + // Export a little bit of extra information regarding the reference since it doesn't really matter outside the same + // repository. + // + if ( rep != null && jobObjectId != null ) { + try { + RepositoryObject objectInformation = rep.getObjectInformation( jobObjectId, RepositoryObjectType.JOB ); + if ( objectInformation != null ) { + jobname = objectInformation.getName(); + directory = objectInformation.getRepositoryDirectory().getPath(); + } + } catch ( KettleException e ) { + // Ignore object reference problems. It simply means that the reference is no longer valid. + } + } + retval.append( " " ).append( XMLHandler.addTagValue( "filename", filename ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "jobname", jobname ) ); + + if ( directory != null ) { + retval.append( " " ).append( XMLHandler.addTagValue( "directory", directory ) ); + } else if ( directoryPath != null ) { + retval.append( " " ).append( XMLHandler.addTagValue( "directory", directoryPath ) ); + } + retval.append( " " ).append( XMLHandler.addTagValue( "arg_from_previous", argFromPrevious ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "params_from_previous", paramsFromPrevious ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "exec_per_row", execPerRow ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "set_logfile", setLogfile ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "logfile", logfile ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "logext", logext ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "add_date", addDate ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "add_time", addTime ) ); + retval.append( " " ).append( + XMLHandler.addTagValue( "loglevel", logFileLevel != null ? logFileLevel.getCode() : DEFAULT_LOG_LEVEL + .getCode() ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "slave_server_name", remoteSlaveServerName ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "wait_until_finished", waitingToFinish ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "follow_abort_remote", followingAbortRemotely ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "expand_remote_job", expandingRemoteJob ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "create_parent_folder", createParentFolder ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "pass_export", passingExport ) ); + + if ( arguments != null ) { + for ( int i = 0; i < arguments.length; i++ ) { + // This is a very very bad way of making an XML file, don't use it (or + // copy it). Sven Boden + retval.append( " " ).append( XMLHandler.addTagValue( "argument" + i, arguments[i] ) ); + } + } + + if ( parameters != null ) { + retval.append( " " ).append( XMLHandler.openTag( "parameters" ) ); + + retval.append( " " ).append( XMLHandler.addTagValue( "pass_all_parameters", passingAllParameters ) ); + + for ( int i = 0; i < parameters.length; i++ ) { + // This is a better way of making the XML file than the arguments. + retval.append( " " ).append( XMLHandler.openTag( "parameter" ) ); + + retval.append( " " ).append( XMLHandler.addTagValue( "name", parameters[i] ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "stream_name", parameterFieldNames[i] ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "value", parameterValues[i] ) ); + + retval.append( " " ).append( XMLHandler.closeTag( "parameter" ) ); + } + retval.append( " " ).append( XMLHandler.closeTag( "parameters" ) ); + } + retval.append( " " ).append( XMLHandler.addTagValue( "set_append_logfile", setAppendLogfile ) ); + + return retval.toString(); + } + + private void checkObjectLocationSpecificationMethod() { + if ( specificationMethod == null ) { + // Backward compatibility + // + // Default = Filename + // + specificationMethod = ObjectLocationSpecificationMethod.FILENAME; + + if ( !Const.isEmpty( filename ) ) { + specificationMethod = ObjectLocationSpecificationMethod.FILENAME; + } else if ( jobObjectId != null ) { + specificationMethod = ObjectLocationSpecificationMethod.REPOSITORY_BY_REFERENCE; + } else if ( !Const.isEmpty( jobname ) ) { + specificationMethod = ObjectLocationSpecificationMethod.REPOSITORY_BY_NAME; + } + } + } + + public void loadXML( Node entrynode, List databases, List slaveServers, + Repository rep, IMetaStore metaStore ) throws KettleXMLException { + try { + super.loadXML( entrynode, databases, slaveServers ); + + String method = XMLHandler.getTagValue( entrynode, "specification_method" ); + specificationMethod = ObjectLocationSpecificationMethod.getSpecificationMethodByCode( method ); + + String jobId = XMLHandler.getTagValue( entrynode, "job_object_id" ); + jobObjectId = Const.isEmpty( jobId ) ? null : new StringObjectId( jobId ); + filename = XMLHandler.getTagValue( entrynode, "filename" ); + jobname = XMLHandler.getTagValue( entrynode, "jobname" ); + + if ( rep != null && rep.isConnected() && !Const.isEmpty( jobname ) ) { + specificationMethod = ObjectLocationSpecificationMethod.REPOSITORY_BY_NAME; + } + + // Backward compatibility check for object specification + // + checkObjectLocationSpecificationMethod(); + + argFromPrevious = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "arg_from_previous" ) ); + paramsFromPrevious = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "params_from_previous" ) ); + execPerRow = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "exec_per_row" ) ); + setLogfile = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "set_logfile" ) ); + addDate = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "add_date" ) ); + addTime = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "add_time" ) ); + logfile = XMLHandler.getTagValue( entrynode, "logfile" ); + logext = XMLHandler.getTagValue( entrynode, "logext" ); + logFileLevel = LogLevel.getLogLevelForCode( XMLHandler.getTagValue( entrynode, "loglevel" ) ); + setAppendLogfile = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "set_append_logfile" ) ); + remoteSlaveServerName = XMLHandler.getTagValue( entrynode, "slave_server_name" ); + passingExport = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "pass_export" ) ); + directory = XMLHandler.getTagValue( entrynode, "directory" ); + createParentFolder = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "create_parent_folder" ) ); + + String wait = XMLHandler.getTagValue( entrynode, "wait_until_finished" ); + if ( Const.isEmpty( wait ) ) { + waitingToFinish = true; + } else { + waitingToFinish = "Y".equalsIgnoreCase( wait ); + } + + followingAbortRemotely = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "follow_abort_remote" ) ); + expandingRemoteJob = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "expand_remote_job" ) ); + + // How many arguments? + int argnr = 0; + while ( XMLHandler.getTagValue( entrynode, "argument" + argnr ) != null ) { + argnr++; + } + allocateArgs( argnr ); + + // Read them all... This is a very BAD way to do it by the way. Sven + // Boden. + for ( int a = 0; a < argnr; a++ ) { + arguments[a] = XMLHandler.getTagValue( entrynode, "argument" + a ); + } + + Node parametersNode = XMLHandler.getSubNode( entrynode, "parameters" ); + + String passAll = XMLHandler.getTagValue( parametersNode, "pass_all_parameters" ); + passingAllParameters = Const.isEmpty( passAll ) || "Y".equalsIgnoreCase( passAll ); + + int nrParameters = XMLHandler.countNodes( parametersNode, "parameter" ); + allocateParams( nrParameters ); + + for ( int i = 0; i < nrParameters; i++ ) { + Node knode = XMLHandler.getSubNodeByNr( parametersNode, "parameter", i ); + + parameters[i] = XMLHandler.getTagValue( knode, "name" ); + parameterFieldNames[i] = XMLHandler.getTagValue( knode, "stream_name" ); + parameterValues[i] = XMLHandler.getTagValue( knode, "value" ); + } + } catch ( KettleXMLException xe ) { + throw new KettleXMLException( "Unable to load 'job' job entry from XML node", xe ); + } + } + + /** + * Load the jobentry from repository + */ + public void loadRep( Repository rep, IMetaStore metaStore, ObjectId id_jobentry, List databases, + List slaveServers ) throws KettleException { + try { + String method = rep.getJobEntryAttributeString( id_jobentry, "specification_method" ); + specificationMethod = ObjectLocationSpecificationMethod.getSpecificationMethodByCode( method ); + String jobId = rep.getJobEntryAttributeString( id_jobentry, "job_object_id" ); + jobObjectId = Const.isEmpty( jobId ) ? null : new StringObjectId( jobId ); + jobname = rep.getJobEntryAttributeString( id_jobentry, "name" ); + directory = rep.getJobEntryAttributeString( id_jobentry, "dir_path" ); + filename = rep.getJobEntryAttributeString( id_jobentry, "file_name" ); + + // Backward compatibility check for object specification + // + checkObjectLocationSpecificationMethod(); + + argFromPrevious = rep.getJobEntryAttributeBoolean( id_jobentry, "arg_from_previous" ); + paramsFromPrevious = rep.getJobEntryAttributeBoolean( id_jobentry, "params_from_previous" ); + execPerRow = rep.getJobEntryAttributeBoolean( id_jobentry, "exec_per_row" ); + setLogfile = rep.getJobEntryAttributeBoolean( id_jobentry, "set_logfile" ); + addDate = rep.getJobEntryAttributeBoolean( id_jobentry, "add_date" ); + addTime = rep.getJobEntryAttributeBoolean( id_jobentry, "add_time" ); + logfile = rep.getJobEntryAttributeString( id_jobentry, "logfile" ); + logext = rep.getJobEntryAttributeString( id_jobentry, "logext" ); + logFileLevel = LogLevel.getLogLevelForCode( rep.getJobEntryAttributeString( id_jobentry, "loglevel" ) ); + setAppendLogfile = rep.getJobEntryAttributeBoolean( id_jobentry, "set_append_logfile" ); + remoteSlaveServerName = rep.getJobEntryAttributeString( id_jobentry, "slave_server_name" ); + passingExport = rep.getJobEntryAttributeBoolean( id_jobentry, "pass_export" ); + waitingToFinish = rep.getJobEntryAttributeBoolean( id_jobentry, "wait_until_finished", true ); + followingAbortRemotely = rep.getJobEntryAttributeBoolean( id_jobentry, "follow_abort_remote" ); + expandingRemoteJob = rep.getJobEntryAttributeBoolean( id_jobentry, "expand_remote_job" ); + createParentFolder = rep.getJobEntryAttributeBoolean( id_jobentry, "create_parent_folder" ); + + // How many arguments? + int argnr = rep.countNrJobEntryAttributes( id_jobentry, "argument" ); + allocateArgs( argnr ); + + // Read all arguments ... + for ( int a = 0; a < argnr; a++ ) { + arguments[a] = rep.getJobEntryAttributeString( id_jobentry, a, "argument" ); + } + + // How many arguments? + int parameternr = rep.countNrJobEntryAttributes( id_jobentry, "parameter_name" ); + allocateParams( parameternr ); + + // Read all parameters ... + for ( int a = 0; a < parameternr; a++ ) { + parameters[a] = rep.getJobEntryAttributeString( id_jobentry, a, "parameter_name" ); + parameterFieldNames[a] = rep.getJobEntryAttributeString( id_jobentry, a, "parameter_stream_name" ); + parameterValues[a] = rep.getJobEntryAttributeString( id_jobentry, a, "parameter_value" ); + } + + passingAllParameters = rep.getJobEntryAttributeBoolean( id_jobentry, "pass_all_parameters", true ); + + } catch ( KettleDatabaseException dbe ) { + throw new KettleException( "Unable to load job entry of type 'job' from the repository with id_jobentry=" + + id_jobentry, dbe ); + } + } + + // Save the attributes of this job entry + // + public void saveRep( Repository rep, IMetaStore metaStore, ObjectId id_job ) throws KettleException { + try { + rep.saveJobEntryAttribute( id_job, getObjectId(), "specification_method", specificationMethod == null + ? null : specificationMethod.getCode() ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "job_object_id", jobObjectId == null ? null : jobObjectId + .toString() ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "name", getJobName() ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "dir_path", getDirectory() != null ? getDirectory() : "" ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "file_name", filename ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "arg_from_previous", argFromPrevious ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "params_from_previous", paramsFromPrevious ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "exec_per_row", execPerRow ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "set_logfile", setLogfile ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "add_date", addDate ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "add_time", addTime ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "logfile", logfile ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "logext", logext ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "set_append_logfile", setAppendLogfile ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "loglevel", logFileLevel != null + ? logFileLevel.getCode() : JobEntryJob.DEFAULT_LOG_LEVEL.getCode() ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "slave_server_name", remoteSlaveServerName ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "pass_export", passingExport ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "wait_until_finished", waitingToFinish ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "follow_abort_remote", followingAbortRemotely ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "expand_remote_job", expandingRemoteJob ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "create_parent_folder", createParentFolder ); + + // save the arguments... + if ( arguments != null ) { + for ( int i = 0; i < arguments.length; i++ ) { + rep.saveJobEntryAttribute( id_job, getObjectId(), i, "argument", arguments[i] ); + } + } + + // save the parameters... + if ( parameters != null ) { + for ( int i = 0; i < parameters.length; i++ ) { + rep.saveJobEntryAttribute( id_job, getObjectId(), i, "parameter_name", parameters[i] ); + rep.saveJobEntryAttribute( id_job, getObjectId(), i, "parameter_stream_name", Const.NVL( + parameterFieldNames[i], "" ) ); + rep.saveJobEntryAttribute( id_job, getObjectId(), i, "parameter_value", Const.NVL( + parameterValues[i], "" ) ); + } + } + + rep.saveJobEntryAttribute( id_job, getObjectId(), "pass_all_parameters", passingAllParameters ); + } catch ( KettleDatabaseException dbe ) { + throw new KettleException( + "Unable to save job entry of type job to the repository with id_job=" + id_job, dbe ); + } + } + + public Result execute( Result result, int nr ) throws KettleException { + result.setEntryNr( nr ); + + LogChannelFileWriter logChannelFileWriter = null; + + LogLevel jobLogLevel = parentJob.getLogLevel(); + if ( setLogfile ) { + String realLogFilename = environmentSubstitute( getLogFilename() ); + // We need to check here the log filename + // if we do not have one, we must fail + if ( Const.isEmpty( realLogFilename ) ) { + logError( BaseMessages.getString( PKG, "JobJob.Exception.LogFilenameMissing" ) ); + result.setNrErrors( 1 ); + result.setResult( false ); + return result; + } + + // create parent folder? + if ( !createParentFolder( realLogFilename ) ) { + result.setNrErrors( 1 ); + result.setResult( false ); + return result; + } + try { + logChannelFileWriter = + new LogChannelFileWriter( + this.getLogChannelId(), KettleVFS.getFileObject( realLogFilename ), setAppendLogfile ); + logChannelFileWriter.startLogging(); + } catch ( KettleException e ) { + logError( "Unable to open file appender for file [" + getLogFilename() + "] : " + e.toString() ); + logError( Const.getStackTracker( e ) ); + result.setNrErrors( 1 ); + result.setResult( false ); + return result; + } + jobLogLevel = logFileLevel; + } + + // Figure out the remote slave server... + // + SlaveServer remoteSlaveServer = null; + if ( !Const.isEmpty( remoteSlaveServerName ) ) { + String realRemoteSlaveServerName = environmentSubstitute( remoteSlaveServerName ); + remoteSlaveServer = parentJob.getJobMeta().findSlaveServer( realRemoteSlaveServerName ); + if ( remoteSlaveServer == null ) { + throw new KettleException( BaseMessages.getString( + PKG, "JobJob.Exception.UnableToFindRemoteSlaveServer", realRemoteSlaveServerName ) ); + } + } + try { + // First load the job, outside of the loop... + if ( parentJob.getJobMeta() != null ) { + // reset the internal variables again. + // Maybe we should split up the variables even more like in UNIX shells. + // The internal variables need to be reset to be able use them properly + // in 2 sequential sub jobs. + parentJob.getJobMeta().setInternalKettleVariables(); + } + + // Explain what we are loading... + // + switch ( specificationMethod ) { + case REPOSITORY_BY_NAME: + if ( log.isDetailed() ) { + logDetailed( "Loading job from repository : [" + + directory + " : " + environmentSubstitute( jobname ) + "]" ); + } + break; + case FILENAME: + if ( log.isDetailed() ) { + logDetailed( "Loading job from XML file : [" + environmentSubstitute( filename ) + "]" ); + } + break; + case REPOSITORY_BY_REFERENCE: + if ( log.isDetailed() ) { + logDetailed( "Loading job from repository by reference : [" + jobObjectId + "]" ); + } + break; + default: + break; + } + + JobMeta jobMeta = getJobMeta( rep, this ); + + // Verify that we loaded something, complain if we did not... + // + if ( jobMeta == null ) { + throw new KettleException( + "Unable to load the job: please specify the name and repository directory OR a filename" ); + } + + verifyRecursiveExecution( parentJob, jobMeta ); + + int iteration = 0; + String[] args1 = arguments; + // no arguments? Check the parent jobs arguments + if ( args1 == null || args1.length == 0 ) { + args1 = parentJob.getArguments(); + } + + copyVariablesFrom( parentJob ); + setParentVariableSpace( parentJob ); + + // + // For the moment only do variable translation at the start of a job, not + // for every input row (if that would be switched on) + // + String[] args = null; + if ( args1 != null ) { + args = new String[args1.length]; + for ( int idx = 0; idx < args1.length; idx++ ) { + args[idx] = environmentSubstitute( args1[idx] ); + } + } + + RowMetaAndData resultRow = null; + boolean first = true; + List rows = new ArrayList( result.getRows() ); + + while ( ( first && !execPerRow ) + || ( execPerRow && rows != null && iteration < rows.size() && result.getNrErrors() == 0 ) ) { + first = false; + + // Clear the result rows of the result + // Otherwise we double the amount of rows every iteration in the simple cases. + // + if ( execPerRow ) { + result.getRows().clear(); + } + + if ( rows != null && execPerRow ) { + resultRow = rows.get( iteration ); + } else { + resultRow = null; + } + + NamedParams namedParam = new NamedParamsDefault(); + + // First (optionally) copy all the parameter values from the parent job + // + if ( paramsFromPrevious ) { + String[] parentParameters = parentJob.listParameters(); + for ( int idx = 0; idx < parentParameters.length; idx++ ) { + String par = parentParameters[idx]; + String def = parentJob.getParameterDefault( par ); + String val = parentJob.getParameterValue( par ); + String des = parentJob.getParameterDescription( par ); + + namedParam.addParameterDefinition( par, def, des ); + namedParam.setParameterValue( par, val ); + } + } + + // Now add those parameter values specified by the user in the job entry + // + if ( parameters != null ) { + for ( int idx = 0; idx < parameters.length; idx++ ) { + if ( !Const.isEmpty( parameters[idx] ) ) { + + // If it's not yet present in the parent job, add it... + // + if ( Const.indexOfString( parameters[idx], namedParam.listParameters() ) < 0 ) { + // We have a parameter + try { + namedParam.addParameterDefinition( parameters[idx], "", "Job entry runtime" ); + } catch ( DuplicateParamException e ) { + // Should never happen + // + logError( "Duplicate parameter definition for " + parameters[idx] ); + } + } + + if ( Const.isEmpty( Const.trim( parameterFieldNames[idx] ) ) ) { + namedParam.setParameterValue( parameters[idx], Const.NVL( + environmentSubstitute( parameterValues[idx] ), "" ) ); + } else { + // something filled in, in the field column... + // + String value = ""; + if ( resultRow != null ) { + value = resultRow.getString( parameterFieldNames[idx], "" ); + } + namedParam.setParameterValue( parameters[idx], value ); + } + } + } + } + + Result oneResult = new Result(); + + List sourceRows = null; + + if ( execPerRow ) { + // Execute for each input row + + if ( argFromPrevious ) { + // Copy the input row to the (command line) arguments + + args = null; + if ( resultRow != null ) { + args = new String[resultRow.size()]; + for ( int i = 0; i < resultRow.size(); i++ ) { + args[i] = resultRow.getString( i, null ); + } + } + } else { + // Just pass a single row + List newList = new ArrayList(); + newList.add( resultRow ); + sourceRows = newList; + } + + if ( paramsFromPrevious ) { // Copy the input the parameters + + if ( parameters != null ) { + for ( int idx = 0; idx < parameters.length; idx++ ) { + if ( !Const.isEmpty( parameters[idx] ) ) { + // We have a parameter + if ( Const.isEmpty( Const.trim( parameterFieldNames[idx] ) ) ) { + namedParam.setParameterValue( parameters[idx], Const.NVL( + environmentSubstitute( parameterValues[idx] ), "" ) ); + } else { + String fieldValue = ""; + + if ( resultRow != null ) { + fieldValue = resultRow.getString( parameterFieldNames[idx], "" ); + } + // Get the value from the input stream + namedParam.setParameterValue( parameters[idx], Const.NVL( fieldValue, "" ) ); + } + } + } + } + } + } else { + if ( argFromPrevious ) { + // Only put the first Row on the arguments + args = null; + if ( resultRow != null ) { + args = new String[resultRow.size()]; + for ( int i = 0; i < resultRow.size(); i++ ) { + args[i] = resultRow.getString( i, null ); + } + } + } else { + // Keep it as it was... + sourceRows = result.getRows(); + } + + if ( paramsFromPrevious ) { // Copy the input the parameters + + if ( parameters != null ) { + for ( int idx = 0; idx < parameters.length; idx++ ) { + if ( !Const.isEmpty( parameters[idx] ) ) { + // We have a parameter + if ( Const.isEmpty( Const.trim( parameterFieldNames[idx] ) ) ) { + namedParam.setParameterValue( parameters[idx], Const.NVL( + environmentSubstitute( parameterValues[idx] ), "" ) ); + } else { + String fieldValue = ""; + + if ( resultRow != null ) { + fieldValue = resultRow.getString( parameterFieldNames[idx], "" ); + } + // Get the value from the input stream + namedParam.setParameterValue( parameters[idx], Const.NVL( fieldValue, "" ) ); + } + } + } + } + } + } + + if ( remoteSlaveServer == null ) { + // Local execution... + // + + // Create a new job + // + job = new Job( rep, jobMeta, this ); + job.setParentJob( parentJob ); + job.setLogLevel( jobLogLevel ); + job.shareVariablesWith( this ); + job.setInternalKettleVariables( this ); + job.copyParametersFrom( jobMeta ); + job.setInteractive( parentJob.isInteractive() ); + if ( job.isInteractive() ) { + job.getJobEntryListeners().addAll( parentJob.getJobEntryListeners() ); + } + + // Pass the socket repository all around. + // + job.setSocketRepository( parentJob.getSocketRepository() ); + + // Set the parameters calculated above on this instance. + // + job.clearParameters(); + String[] parameterNames = job.listParameters(); + for ( int idx = 0; idx < parameterNames.length; idx++ ) { + // Grab the parameter value set in the job entry + // + String thisValue = namedParam.getParameterValue( parameterNames[idx] ); + if ( !Const.isEmpty( thisValue ) ) { + // Set the value as specified by the user in the job entry + // + job.setParameterValue( parameterNames[idx], thisValue ); + } else { + // See if the parameter had a value set in the parent job... + // This value should pass down to the sub-job if that's what we + // opted to do. + // + if ( isPassingAllParameters() ) { + String parentValue = parentJob.getParameterValue( parameterNames[idx] ); + if ( !Const.isEmpty( parentValue ) ) { + job.setParameterValue( parameterNames[idx], parentValue ); + } + } + } + } + job.activateParameters(); + + // Set the source rows we calculated above... + // + job.setSourceRows( sourceRows ); + + // Don't forget the logging... + job.beginProcessing(); + + // Link the job with the sub-job + parentJob.getJobTracker().addJobTracker( job.getJobTracker() ); + + // Link both ways! + job.getJobTracker().setParentJobTracker( parentJob.getJobTracker() ); + + if ( parentJob.getJobMeta().isBatchIdPassed() ) { + job.setPassedBatchId( parentJob.getBatchId() ); + } + + job.setArguments( args ); + + // Inform the parent job we started something here... + // + for ( DelegationListener delegationListener : parentJob.getDelegationListeners() ) { + // TODO: copy some settings in the job execution configuration, not strictly needed + // but the execution configuration information is useful in case of a job re-start + // + delegationListener.jobDelegationStarted( job, new JobExecutionConfiguration() ); + } + + JobEntryJobRunner runner = new JobEntryJobRunner( job, result, nr, log ); + Thread jobRunnerThread = new Thread( runner ); + // PDI-6518 + // added UUID to thread name, otherwise threads do share names if jobs entries are executed in parallel in a + // parent job + // if that happens, contained transformations start closing each other's connections + jobRunnerThread.setName( Const.NVL( job.getJobMeta().getName(), job.getJobMeta().getFilename() ) + + " UUID: " + UUID.randomUUID().toString() ); + jobRunnerThread.start(); + + // Keep running until we're done. + // + while ( !runner.isFinished() && !parentJob.isStopped() ) { + try { + Thread.sleep( 0, 1 ); + } catch ( InterruptedException e ) { + // Ignore + } + } + + // if the parent-job was stopped, stop the sub-job too... + if ( parentJob.isStopped() ) { + job.stopAll(); + runner.waitUntilFinished(); // Wait until finished! + } + + oneResult = runner.getResult(); + + } else { + + // Make sure we can parameterize the slave server connection + // + remoteSlaveServer.shareVariablesWith( this ); + + // Remote execution... + // + JobExecutionConfiguration jobExecutionConfiguration = new JobExecutionConfiguration(); + jobExecutionConfiguration.setPreviousResult( result.lightClone() ); // lightClone() because rows are + // overwritten in next line. + jobExecutionConfiguration.getPreviousResult().setRows( sourceRows ); + jobExecutionConfiguration.setArgumentStrings( args ); + jobExecutionConfiguration.setVariables( this ); + jobExecutionConfiguration.setRemoteServer( remoteSlaveServer ); + jobExecutionConfiguration.setRepository( rep ); + jobExecutionConfiguration.setLogLevel( jobLogLevel ); + jobExecutionConfiguration.setPassingExport( passingExport ); + jobExecutionConfiguration.setExpandingRemoteJob( expandingRemoteJob ); + for ( String param : namedParam.listParameters() ) { + String defValue = namedParam.getParameterDefault( param ); + String value = namedParam.getParameterValue( param ); + jobExecutionConfiguration.getParams().put( param, Const.NVL( value, defValue ) ); + } + if ( parentJob.getJobMeta().isBatchIdPassed() ) { + jobExecutionConfiguration.setPassedBatchId( parentJob.getBatchId() ); + } + + // Send the XML over to the slave server + // Also start the job over there... + // + String carteObjectId = null; + try { + carteObjectId = Job.sendToSlaveServer( jobMeta, jobExecutionConfiguration, rep, metaStore ); + } catch ( KettleException e ) { + // Perhaps the job exists on the remote server, carte is down, etc. + // This is an abort situation, stop the parent job... + // We want this in case we are running in parallel. The other job + // entries can stop running now. + // + parentJob.stopAll(); + + // Pass the exception along + // + throw e; + } + + // Now start the monitoring... + // + SlaveServerJobStatus jobStatus = null; + while ( !parentJob.isStopped() && waitingToFinish ) { + try { + jobStatus = remoteSlaveServer.getJobStatus( jobMeta.getName(), carteObjectId, 0 ); + if ( jobStatus.getResult() != null ) { + // The job is finished, get the result... + // + oneResult = jobStatus.getResult(); + break; + } + } catch ( Exception e1 ) { + logError( "Unable to contact slave server [" + + remoteSlaveServer + "] to verify the status of job [" + jobMeta.getName() + "]", e1 ); + oneResult.setNrErrors( 1L ); + break; // Stop looking too, chances are too low the server will + // come back on-line + } + + // sleep for 1 second + try { + Thread.sleep( 1000 ); + } catch ( InterruptedException e ) { + // Ignore + } + } + + if ( !waitingToFinish ) { + // Since the job was posted successfully, the result is true... + // + oneResult = new Result(); + oneResult.setResult( true ); + } + + if ( parentJob.isStopped() ) { + try { + // See if we have a status and if we need to stop the remote + // execution here... + // + if ( jobStatus == null || jobStatus.isRunning() ) { + // Try a remote abort ... + // + remoteSlaveServer.stopJob( jobMeta.getName(), carteObjectId ); + } + } catch ( Exception e1 ) { + logError( "Unable to contact slave server [" + + remoteSlaveServer + "] to stop job [" + jobMeta.getName() + "]", e1 ); + oneResult.setNrErrors( 1L ); + break; // Stop looking too, chances are too low the server will + // come back on-line + } + } + + } + + result.clear(); // clear only the numbers, NOT the files or rows. + result.add( oneResult ); + + // Set the result rows too, if any ... + if ( !Const.isEmpty( oneResult.getRows() ) ) { + result.setRows( new ArrayList( oneResult.getRows() ) ); + } + + // if one of them fails (in the loop), increase the number of errors + // + if ( oneResult.getResult() == false ) { + result.setNrErrors( result.getNrErrors() + 1 ); + } + + iteration++; + } + + } catch ( KettleException ke ) { + logError( "Error running job entry 'job' : ", ke ); + + result.setResult( false ); + result.setNrErrors( 1L ); + } + + if ( setLogfile ) { + if ( logChannelFileWriter != null ) { + logChannelFileWriter.stopLogging(); + + ResultFile resultFile = + new ResultFile( + ResultFile.FILE_TYPE_LOG, logChannelFileWriter.getLogFile(), parentJob.getJobname(), getName() ); + result.getResultFiles().put( resultFile.getFile().toString(), resultFile ); + + // See if anything went wrong during file writing... + // + if ( logChannelFileWriter.getException() != null ) { + logError( "Unable to open log file [" + getLogFilename() + "] : " ); + logError( Const.getStackTracker( logChannelFileWriter.getException() ) ); + result.setNrErrors( 1 ); + result.setResult( false ); + return result; + } + } + } + + if ( result.getNrErrors() > 0 ) { + result.setResult( false ); + } else { + result.setResult( true ); + } + + return result; + } + + private boolean createParentFolder( String filename ) { + // Check for parent folder + FileObject parentfolder = null; + boolean resultat = true; + try { + // Get parent folder + parentfolder = KettleVFS.getFileObject( filename, this ).getParent(); + if ( !parentfolder.exists() ) { + if ( createParentFolder ) { + if ( log.isDebug() ) { + log.logDebug( BaseMessages.getString( PKG, "JobJob.Log.ParentLogFolderNotExist", parentfolder + .getName().toString() ) ); + } + parentfolder.createFolder(); + if ( log.isDebug() ) { + log.logDebug( BaseMessages.getString( PKG, "JobJob.Log.ParentLogFolderCreated", parentfolder + .getName().toString() ) ); + } + } else { + log.logError( BaseMessages.getString( PKG, "JobJob.Log.ParentLogFolderNotExist", parentfolder + .getName().toString() ) ); + resultat = false; + } + } else { + if ( log.isDebug() ) { + log.logDebug( BaseMessages.getString( PKG, "JobJob.Log.ParentLogFolderExists", parentfolder + .getName().toString() ) ); + } + } + } catch ( Exception e ) { + resultat = false; + log.logError( BaseMessages.getString( PKG, "JobJob.Error.ChekingParentLogFolderTitle" ), BaseMessages + .getString( PKG, "JobJob.Error.ChekingParentLogFolder", parentfolder.getName().toString() ), e ); + } finally { + if ( parentfolder != null ) { + try { + parentfolder.close(); + parentfolder = null; + } catch ( Exception ex ) { + // Ignore + } + } + } + + return resultat; + } + + /** + * Make sure that we are not loading jobs recursively... + * + * @param parentJobMeta + * the parent job metadata + * @param jobMeta + * the job metadata + * @throws KettleException + * in case both jobs are loaded from the same source + */ + private void verifyRecursiveExecution( Job parentJob, JobMeta jobMeta ) throws KettleException { + + if ( parentJob == null ) { + return; // OK! + } + + JobMeta parentJobMeta = parentJob.getJobMeta(); + + if ( parentJobMeta.getName() == null && jobMeta.getName() != null ) { + return; // OK + } + if ( parentJobMeta.getName() != null && jobMeta.getName() == null ) { + return; // OK as well. + } + + // Not from the repository? just verify the filename + // + if ( jobMeta.getFilename() != null && jobMeta.getFilename().equals( parentJobMeta.getFilename() ) ) { + throw new KettleException( BaseMessages.getString( PKG, "JobJobError.Recursive", jobMeta.getFilename() ) ); + } + + // Different directories: OK + if ( parentJobMeta.getRepositoryDirectory() == null && jobMeta.getRepositoryDirectory() != null ) { + return; + } + if ( parentJobMeta.getRepositoryDirectory() != null && jobMeta.getRepositoryDirectory() == null ) { + return; + } + if ( jobMeta.getRepositoryDirectory().getObjectId() != parentJobMeta.getRepositoryDirectory().getObjectId() ) { + return; + } + + // Same names, same directories : loaded from same location in the + // repository: + // --> recursive loading taking place! + // + if ( parentJobMeta.getName().equals( jobMeta.getName() ) ) { + throw new KettleException( BaseMessages.getString( PKG, "JobJobError.Recursive", jobMeta.getFilename() ) ); + } + + // Also compare with the grand-parent (if there is any) + verifyRecursiveExecution( parentJob.getParentJob(), jobMeta ); + } + + public void clear() { + super.clear(); + + specificationMethod = ObjectLocationSpecificationMethod.FILENAME; + jobname = null; + filename = null; + directory = null; + arguments = null; + argFromPrevious = false; + addDate = false; + addTime = false; + logfile = null; + logext = null; + setLogfile = false; + setAppendLogfile = false; + } + + public boolean evaluates() { + return true; + } + + public boolean isUnconditional() { + return true; + } + + public List getSQLStatements( Repository repository, IMetaStore metaStore, VariableSpace space ) throws KettleException { + this.copyVariablesFrom( space ); + JobMeta jobMeta = getJobMeta( repository, metaStore, space ); + return jobMeta.getSQLStatements( repository, null ); + } + + @Deprecated + public JobMeta getJobMeta( Repository rep, VariableSpace space ) throws KettleException { + if ( rep != null ) { + return getJobMeta( rep, rep.getMetaStore(), space ); + } else { + return getJobMeta( rep, getMetaStore(), space ); + } + } + + public JobMeta getJobMeta( Repository rep, IMetaStore metaStore, VariableSpace space ) throws KettleException { + JobMeta jobMeta = null; + try { + CurrentDirectoryResolver r = new CurrentDirectoryResolver(); + VariableSpace tmpSpace = r.resolveCurrentDirectory( + specificationMethod, space, rep, parentJob, getFilename() ); + switch ( specificationMethod ) { + case FILENAME: + String realFilename = tmpSpace.environmentSubstitute( getFilename() ); + if ( rep != null ) { + // need to try to load from the repository + realFilename = r.normalizeSlashes( realFilename ); + try { + String dirStr = realFilename.substring( 0, realFilename.lastIndexOf( "/" ) ); + String tmpFilename = realFilename.substring( realFilename.lastIndexOf( "/" ) + 1 ); + RepositoryDirectoryInterface dir = rep.findDirectory( dirStr ); + jobMeta = rep.loadJob( tmpFilename, dir, null, null ); + } catch ( KettleException ke ) { + // try without extension + if ( realFilename.endsWith( Const.STRING_JOB_DEFAULT_EXT ) ) { + try { + String tmpFilename = realFilename.substring( realFilename.lastIndexOf( "/" ) + 1, + realFilename.indexOf( "." + Const.STRING_JOB_DEFAULT_EXT ) ); + String dirStr = realFilename.substring( 0, realFilename.lastIndexOf( "/" ) ); + RepositoryDirectoryInterface dir = rep.findDirectory( dirStr ); + jobMeta = rep.loadJob( tmpFilename, dir, null, null ); + } catch ( KettleException ke2 ) { + // fall back to try loading from file system (mappingJobMeta is going to be null) + } + } + } + } + if ( jobMeta == null ) { + jobMeta = new JobMeta( tmpSpace, realFilename, rep, metaStore, null ); + } + break; + case REPOSITORY_BY_NAME: + String realDirectory = tmpSpace.environmentSubstitute( getDirectory() ); + String realJobName = tmpSpace.environmentSubstitute( getJobName() ); + + if ( rep != null ) { + realDirectory = r.normalizeSlashes( realDirectory ); + RepositoryDirectoryInterface repositoryDirectory = + rep.loadRepositoryDirectoryTree().findDirectory( realDirectory ); + if ( repositoryDirectory == null ) { + throw new KettleException( "Unable to find repository directory [" + + Const.NVL( realDirectory, "" ) + "]" ); + } + jobMeta = rep.loadJob( realJobName, repositoryDirectory, null, null ); // reads + } else { + // rep is null, let's try loading by filename + try { + jobMeta = new JobMeta( tmpSpace, realDirectory + "/" + realJobName, rep, metaStore, null ); + } catch ( KettleException ke ) { + try { + // add .kjb extension and try again + jobMeta = new JobMeta( tmpSpace, + realDirectory + "/" + realJobName + "." + Const.STRING_JOB_DEFAULT_EXT, rep, metaStore, null ); + } catch ( KettleException ke2 ) { + ke2.printStackTrace(); + throw new KettleException( + "Could not execute job specified in a repository since we're not connected to one" ); + } + } + } + break; + case REPOSITORY_BY_REFERENCE: + if ( rep != null ) { + // Load the last version... + // + jobMeta = rep.loadJob( jobObjectId, null ); + break; + } else { + throw new KettleException( + "Could not execute job specified in a repository since we're not connected to one" ); + } + default: + throw new KettleException( "The specified object location specification method '" + + specificationMethod + "' is not yet supported in this job entry." ); + } + + if ( jobMeta != null ) { + jobMeta.setRepository( rep ); + jobMeta.setMetaStore( metaStore ); + } + + return jobMeta; + } catch ( Exception e ) { + throw new KettleException( "Unexpected error during job metadata load", e ); + } + + } + + /** + * @return Returns the runEveryResultRow. + */ + public boolean isExecPerRow() { + return execPerRow; + } + + /** + * @param runEveryResultRow + * The runEveryResultRow to set. + */ + public void setExecPerRow( boolean runEveryResultRow ) { + this.execPerRow = runEveryResultRow; + } + + public List getResourceDependencies( JobMeta jobMeta ) { + List references = super.getResourceDependencies( jobMeta ); + if ( !Const.isEmpty( filename ) ) { + String realFileName = jobMeta.environmentSubstitute( filename ); + ResourceReference reference = new ResourceReference( this ); + reference.getEntries().add( new ResourceEntry( realFileName, ResourceType.ACTIONFILE ) ); + references.add( reference ); + } + return references; + } + + /** + * Exports the object to a flat-file system, adding content with filename keys to a set of definitions. The supplied + * resource naming interface allows the object to name appropriately without worrying about those parts of the + * implementation specific details. + * + * @param space + * The variable space to resolve (environment) variables with. + * @param definitions + * The map containing the filenames and content + * @param namingInterface + * The resource naming interface allows the object to be named appropriately + * @param repository + * The repository to load resources from + * @param metaStore + * the metaStore to load external metadata from + * + * @return The filename for this object. (also contained in the definitions map) + * @throws KettleException + * in case something goes wrong during the export + */ + public String exportResources( VariableSpace space, Map definitions, + ResourceNamingInterface namingInterface, Repository repository, IMetaStore metaStore ) throws KettleException { + // Try to load the transformation from repository or file. + // Modify this recursively too... + // + // AGAIN: there is no need to clone this job entry because the caller is + // responsible for this. + // + // First load the job meta data... + // + copyVariablesFrom( space ); // To make sure variables are available. + JobMeta jobMeta = getJobMeta( repository, metaStore, space ); + + // Also go down into the job and export the files there. (going down + // recursively) + // + String proposedNewFilename = + jobMeta.exportResources( jobMeta, definitions, namingInterface, repository, metaStore ); + + // To get a relative path to it, we inject + // ${Internal.Job.Filename.Directory} + // + String newFilename = "${" + Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY + "}/" + proposedNewFilename; + + // Set the filename in the job + // + jobMeta.setFilename( newFilename ); + + // exports always reside in the root directory, in case we want to turn this + // into a file repository... + // + jobMeta.setRepositoryDirectory( new RepositoryDirectory() ); + + // export to filename ALWAYS (this allows the exported XML to be executed remotely) + // + setSpecificationMethod( ObjectLocationSpecificationMethod.FILENAME ); + + // change it in the job entry + // + filename = newFilename; + + return proposedNewFilename; + } + + @Override + public void check( List remarks, JobMeta jobMeta, VariableSpace space, + Repository repository, IMetaStore metaStore ) { + if ( setLogfile ) { + JobEntryValidatorUtils.andValidator().validate( this, "logfile", remarks, AndValidator.putValidators( JobEntryValidatorUtils.notBlankValidator() ) ); + } + + if ( null != directory ) { + // if from repo + JobEntryValidatorUtils.andValidator().validate( this, "directory", remarks, AndValidator.putValidators( JobEntryValidatorUtils.notNullValidator() ) ); + JobEntryValidatorUtils.andValidator().validate( this, "jobName", remarks, AndValidator.putValidators( JobEntryValidatorUtils.notBlankValidator() ) ); + } else { + // else from xml file + JobEntryValidatorUtils.andValidator().validate( this, "filename", remarks, AndValidator.putValidators( JobEntryValidatorUtils.notBlankValidator() ) ); + } + } + + public static void main( String[] args ) { + List remarks = new ArrayList(); + new JobEntryJob().check( remarks, null, new Variables(), null, null ); + System.out.printf( "Remarks: %s\n", remarks ); + } + + protected String getLogfile() { + return logfile; + } + + /** + * @return the remote slave server name + */ + public String getRemoteSlaveServerName() { + return remoteSlaveServerName; + } + + /** + * @param remoteSlaveServerName + * the remoteSlaveServer to set + */ + public void setRemoteSlaveServerName( String remoteSlaveServerName ) { + this.remoteSlaveServerName = remoteSlaveServerName; + } + + /** + * @return the waitingToFinish + */ + public boolean isWaitingToFinish() { + return waitingToFinish; + } + + /** + * @param waitingToFinish + * the waitingToFinish to set + */ + public void setWaitingToFinish( boolean waitingToFinish ) { + this.waitingToFinish = waitingToFinish; + } + + /** + * @return the followingAbortRemotely + */ + public boolean isFollowingAbortRemotely() { + return followingAbortRemotely; + } + + /** + * @param followingAbortRemotely + * the followingAbortRemotely to set + */ + public void setFollowingAbortRemotely( boolean followingAbortRemotely ) { + this.followingAbortRemotely = followingAbortRemotely; + } + + /** + * @return the passingAllParameters + */ + public boolean isPassingAllParameters() { + return passingAllParameters; + } + + /** + * @param passingAllParameters + * the passingAllParameters to set + */ + public void setPassingAllParameters( boolean passingAllParameters ) { + this.passingAllParameters = passingAllParameters; + } + + public Job getJob() { + return job; + } + + /** + * @return the jobObjectId + */ + public ObjectId getJobObjectId() { + return jobObjectId; + } + + /** + * @param jobObjectId + * the jobObjectId to set + */ + public void setJobObjectId( ObjectId jobObjectId ) { + this.jobObjectId = jobObjectId; + } + + /** + * @return the specificationMethod + */ + public ObjectLocationSpecificationMethod getSpecificationMethod() { + return specificationMethod; + } + + /** + * @param specificationMethod + * the specificationMethod to set + */ + public void setSpecificationMethod( ObjectLocationSpecificationMethod specificationMethod ) { + this.specificationMethod = specificationMethod; + } + + @Override + public boolean hasRepositoryReferences() { + return specificationMethod == ObjectLocationSpecificationMethod.REPOSITORY_BY_REFERENCE; + } + + /** + * Look up the references after import + * + * @param repository + * the repository to reference. + */ + public void lookupRepositoryReferences( Repository repository ) throws KettleException { + // The correct reference is stored in the job name and directory attributes... + // + RepositoryDirectoryInterface repositoryDirectoryInterface = + RepositoryImportLocation.getRepositoryImportLocation().findDirectory( directory ); + jobObjectId = repository.getJobId( jobname, repositoryDirectoryInterface ); + } + + private boolean isJobDefined() { + return !Const.isEmpty( filename ) + || jobObjectId != null || ( !Const.isEmpty( this.directory ) && !Const.isEmpty( jobname ) ); + } + + public boolean[] isReferencedObjectEnabled() { + return new boolean[] { isJobDefined(), }; + } + + /** + * @return The objects referenced in the step, like a a transformation, a job, a mapper, a reducer, a combiner, ... + */ + public String[] getReferencedObjectDescriptions() { + return new String[] { BaseMessages.getString( PKG, "JobEntryJob.ReferencedObject.Description" ), }; + } + + /** + * Load the referenced object + * + * @param index + * the referenced object index to load (in case there are multiple references) + * @param rep + * the repository + * @param metaStore + * the metaStore + * @param space + * the variable space to use + * @return the referenced object once loaded + * @throws KettleException + */ + public Object loadReferencedObject( int index, Repository rep, IMetaStore metaStore, VariableSpace space ) throws KettleException { + return getJobMeta( rep, metaStore, space ); + } + + public boolean isExpandingRemoteJob() { + return expandingRemoteJob; + } + + public void setExpandingRemoteJob( boolean expandingRemoteJob ) { + this.expandingRemoteJob = expandingRemoteJob; + } +} diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/job/entries/trans/JobEntryTrans.java b/pentaho-kettle/src/main/java/org/pentaho/di/job/entries/trans/JobEntryTrans.java new file mode 100644 index 0000000..ac26b6f --- /dev/null +++ b/pentaho-kettle/src/main/java/org/pentaho/di/job/entries/trans/JobEntryTrans.java @@ -0,0 +1,1563 @@ +/*! ****************************************************************************** + * + * Pentaho Data Integration + * + * Copyright (C) 2002-2013 by Pentaho : http://www.pentaho.com + * + ******************************************************************************* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + ******************************************************************************/ + +package org.pentaho.di.job.entries.trans; + +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.List; +import java.util.Map; + +import org.pentaho.di.cluster.SlaveServer; +import org.pentaho.di.core.CheckResultInterface; +import org.pentaho.di.core.Const; +import org.pentaho.di.core.ObjectLocationSpecificationMethod; +import org.pentaho.di.core.Result; +import org.pentaho.di.core.ResultFile; +import org.pentaho.di.core.RowMetaAndData; +import org.pentaho.di.core.SQLStatement; +import org.pentaho.di.core.database.DatabaseMeta; +import org.pentaho.di.core.exception.KettleDatabaseException; +import org.pentaho.di.core.exception.KettleException; +import org.pentaho.di.core.exception.KettleXMLException; +import org.pentaho.di.core.logging.LogChannelFileWriter; +import org.pentaho.di.core.logging.LogLevel; +import org.pentaho.di.core.parameters.NamedParams; +import org.pentaho.di.core.parameters.NamedParamsDefault; +import org.pentaho.di.core.util.CurrentDirectoryResolver; +import org.pentaho.di.core.util.FileUtil; +import org.pentaho.di.core.variables.VariableSpace; +import org.pentaho.di.core.vfs.KettleVFS; +import org.pentaho.di.core.xml.XMLHandler; +import org.pentaho.di.i18n.BaseMessages; +import org.pentaho.di.job.DelegationListener; +import org.pentaho.di.job.Job; +import org.pentaho.di.job.JobMeta; +import org.pentaho.di.job.entry.JobEntryBase; +import org.pentaho.di.job.entry.JobEntryInterface; +import org.pentaho.di.job.entry.validator.AndValidator; +import org.pentaho.di.job.entry.validator.JobEntryValidatorUtils; +import org.pentaho.di.repository.ObjectId; +import org.pentaho.di.repository.Repository; +import org.pentaho.di.repository.RepositoryDirectory; +import org.pentaho.di.repository.RepositoryDirectoryInterface; +import org.pentaho.di.repository.RepositoryImportLocation; +import org.pentaho.di.repository.RepositoryObject; +import org.pentaho.di.repository.RepositoryObjectType; +import org.pentaho.di.repository.StringObjectId; +import org.pentaho.di.resource.ResourceDefinition; +import org.pentaho.di.resource.ResourceEntry; +import org.pentaho.di.resource.ResourceEntry.ResourceType; +import org.pentaho.di.resource.ResourceNamingInterface; +import org.pentaho.di.resource.ResourceReference; +import org.pentaho.di.trans.Trans; +import org.pentaho.di.trans.TransExecutionConfiguration; +import org.pentaho.di.trans.TransMeta; +import org.pentaho.di.trans.cluster.TransSplitter; +import org.pentaho.di.trans.step.StepMeta; +import org.pentaho.di.www.SlaveServerTransStatus; +import org.pentaho.metastore.api.IMetaStore; +import org.w3c.dom.Node; + +/** + * This is the job entry that defines a transformation to be run. + * + * @author Matt Casters + * @since 1-Oct-2003, rewritten on 18-June-2004 + */ +public class JobEntryTrans extends JobEntryBase implements Cloneable, JobEntryInterface { + private static Class PKG = JobEntryTrans.class; // for i18n purposes, needed by Translator2!! + + private String transname; + + private String filename; + + private String directory; + + private ObjectId transObjectId; + + private ObjectLocationSpecificationMethod specificationMethod; + + public String[] arguments; + + public boolean argFromPrevious; + + public boolean paramsFromPrevious; + + public boolean execPerRow; + + public String[] parameters; + + public String[] parameterFieldNames; + + public String[] parameterValues; + + public boolean clearResultRows; + + public boolean clearResultFiles; + + public boolean createParentFolder; + + public boolean setLogfile; + + public boolean setAppendLogfile; + + public String logfile, logext; + + public boolean addDate, addTime; + + public LogLevel logFileLevel; + + private String directoryPath; + + private boolean clustering; + + public boolean waitingToFinish = true; + + public boolean followingAbortRemotely; + + private String remoteSlaveServerName; + + private boolean passingAllParameters = true; + + private boolean loggingRemoteWork; + + private Trans trans; + + public JobEntryTrans( String name ) { + super( name, "" ); + } + + public JobEntryTrans() { + this( "" ); + clear(); + } + + private void allocateArgs( int nrArgs ) { + arguments = new String[nrArgs]; + } + + private void allocateParams( int nrParameters ) { + parameters = new String[nrParameters]; + parameterFieldNames = new String[nrParameters]; + parameterValues = new String[nrParameters]; + } + + public Object clone() { + JobEntryTrans je = (JobEntryTrans) super.clone(); + if ( arguments != null ) { + int nrArgs = arguments.length; + je.allocateArgs( nrArgs ); + System.arraycopy( arguments, 0, je.arguments, 0, nrArgs ); + } + if ( parameters != null ) { + int nrParameters = parameters.length; + je.allocateParams( nrParameters ); + System.arraycopy( parameters, 0, je.parameters, 0, nrParameters ); + System.arraycopy( parameterFieldNames, 0, je.parameterFieldNames, 0, nrParameters ); + System.arraycopy( parameterValues, 0, je.parameterValues, 0, nrParameters ); + } + return je; + } + + public void setFileName( String n ) { + filename = n; + } + + /** + * @return the filename + * @deprecated use getFilename() instead + */ + @Deprecated + public String getFileName() { + return filename; + } + + public String getFilename() { + return filename; + } + + public String getRealFilename() { + return environmentSubstitute( getFilename() ); + } + + public void setTransname( String transname ) { + this.transname = transname; + } + + public String getTransname() { + return transname; + } + + public String getDirectory() { + return directory; + } + + public void setDirectory( String directory ) { + this.directory = directory; + } + + public String getLogFilename() { + String retval = ""; + if ( setLogfile ) { + retval += logfile == null ? "" : logfile; + Calendar cal = Calendar.getInstance(); + if ( addDate ) { + SimpleDateFormat sdf = new SimpleDateFormat( "yyyyMMdd" ); + retval += "_" + sdf.format( cal.getTime() ); + } + if ( addTime ) { + SimpleDateFormat sdf = new SimpleDateFormat( "HHmmss" ); + retval += "_" + sdf.format( cal.getTime() ); + } + if ( logext != null && logext.length() > 0 ) { + retval += "." + logext; + } + } + return retval; + } + + public String getXML() { + StringBuffer retval = new StringBuffer( 300 ); + + retval.append( super.getXML() ); + + // specificationMethod + // + retval.append( " " ).append( + XMLHandler.addTagValue( "specification_method", specificationMethod == null ? null : specificationMethod + .getCode() ) + ); + retval.append( " " ).append( + XMLHandler.addTagValue( "trans_object_id", transObjectId == null ? null : transObjectId.toString() ) ); + // Export a little bit of extra information regarding the reference since it doesn't really matter outside the same + // repository. + // + if ( rep != null && transObjectId != null ) { + try { + RepositoryObject objectInformation = + rep.getObjectInformation( transObjectId, RepositoryObjectType.TRANSFORMATION ); + if ( objectInformation != null ) { + transname = objectInformation.getName(); + directory = objectInformation.getRepositoryDirectory().getPath(); + } + } catch ( KettleException e ) { + // Ignore object reference problems. It simply means that the reference is no longer valid. + } + } + retval.append( " " ).append( XMLHandler.addTagValue( "filename", filename ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "transname", transname ) ); + + if ( directory != null ) { + retval.append( " " ).append( XMLHandler.addTagValue( "directory", directory ) ); + } else if ( directoryPath != null ) { + // don't loose this info (backup/recovery) + // + retval.append( " " ).append( XMLHandler.addTagValue( "directory", directoryPath ) ); + } + retval.append( " " ).append( XMLHandler.addTagValue( "arg_from_previous", argFromPrevious ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "params_from_previous", paramsFromPrevious ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "exec_per_row", execPerRow ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "clear_rows", clearResultRows ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "clear_files", clearResultFiles ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "set_logfile", setLogfile ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "logfile", logfile ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "logext", logext ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "add_date", addDate ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "add_time", addTime ) ); + retval.append( " " ).append( + XMLHandler.addTagValue( "loglevel", logFileLevel != null ? logFileLevel.getCode() : null ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "cluster", clustering ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "slave_server_name", remoteSlaveServerName ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "set_append_logfile", setAppendLogfile ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "wait_until_finished", waitingToFinish ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "follow_abort_remote", followingAbortRemotely ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "create_parent_folder", createParentFolder ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "logging_remote_work", loggingRemoteWork ) ); + + if ( arguments != null ) { + for ( int i = 0; i < arguments.length; i++ ) { + // This is a very very bad way of making an XML file, don't use it (or + // copy it). Sven Boden + retval.append( " " ).append( XMLHandler.addTagValue( "argument" + i, arguments[ i ] ) ); + } + } + + if ( parameters != null ) { + retval.append( " " ).append( XMLHandler.openTag( "parameters" ) ); + + retval.append( " " ).append( XMLHandler.addTagValue( "pass_all_parameters", passingAllParameters ) ); + + for ( int i = 0; i < parameters.length; i++ ) { + // This is a better way of making the XML file than the arguments. + retval.append( " " ).append( XMLHandler.openTag( "parameter" ) ); + + retval.append( " " ).append( XMLHandler.addTagValue( "name", parameters[ i ] ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "stream_name", parameterFieldNames[ i ] ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "value", parameterValues[ i ] ) ); + + retval.append( " " ).append( XMLHandler.closeTag( "parameter" ) ); + } + retval.append( " " ).append( XMLHandler.closeTag( "parameters" ) ); + } + + return retval.toString(); + } + + private void checkObjectLocationSpecificationMethod() { + if ( specificationMethod == null ) { + // Backward compatibility + // + // Default = Filename + // + specificationMethod = ObjectLocationSpecificationMethod.FILENAME; + + if ( !Const.isEmpty( filename ) ) { + specificationMethod = ObjectLocationSpecificationMethod.FILENAME; + } else if ( transObjectId != null ) { + specificationMethod = ObjectLocationSpecificationMethod.REPOSITORY_BY_REFERENCE; + } else if ( !Const.isEmpty( transname ) ) { + specificationMethod = ObjectLocationSpecificationMethod.REPOSITORY_BY_NAME; + } + } + } + + public void loadXML( Node entrynode, List databases, List slaveServers, + Repository rep, IMetaStore metaStore ) throws KettleXMLException { + try { + super.loadXML( entrynode, databases, slaveServers ); + + String method = XMLHandler.getTagValue( entrynode, "specification_method" ); + specificationMethod = ObjectLocationSpecificationMethod.getSpecificationMethodByCode( method ); + + String transId = XMLHandler.getTagValue( entrynode, "trans_object_id" ); + transObjectId = Const.isEmpty( transId ) ? null : new StringObjectId( transId ); + filename = XMLHandler.getTagValue( entrynode, "filename" ); + transname = XMLHandler.getTagValue( entrynode, "transname" ); + directory = XMLHandler.getTagValue( entrynode, "directory" ); + + if ( rep != null && rep.isConnected() && !Const.isEmpty( transname ) ) { + specificationMethod = ObjectLocationSpecificationMethod.REPOSITORY_BY_NAME; + } + + // Backward compatibility check for object specification + // + checkObjectLocationSpecificationMethod(); + + argFromPrevious = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "arg_from_previous" ) ); + paramsFromPrevious = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "params_from_previous" ) ); + execPerRow = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "exec_per_row" ) ); + clearResultRows = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "clear_rows" ) ); + clearResultFiles = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "clear_files" ) ); + setLogfile = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "set_logfile" ) ); + addDate = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "add_date" ) ); + addTime = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "add_time" ) ); + logfile = XMLHandler.getTagValue( entrynode, "logfile" ); + logext = XMLHandler.getTagValue( entrynode, "logext" ); + logFileLevel = LogLevel.getLogLevelForCode( XMLHandler.getTagValue( entrynode, "loglevel" ) ); + clustering = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "cluster" ) ); + createParentFolder = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "create_parent_folder" ) ); + loggingRemoteWork = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "logging_remote_work" ) ); + + remoteSlaveServerName = XMLHandler.getTagValue( entrynode, "slave_server_name" ); + + setAppendLogfile = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "set_append_logfile" ) ); + String wait = XMLHandler.getTagValue( entrynode, "wait_until_finished" ); + if ( Const.isEmpty( wait ) ) { + waitingToFinish = true; + } else { + waitingToFinish = "Y".equalsIgnoreCase( wait ); + } + + followingAbortRemotely = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "follow_abort_remote" ) ); + + // How many arguments? + int argnr = 0; + while ( XMLHandler.getTagValue( entrynode, "argument" + argnr ) != null ) { + argnr++; + } + allocateArgs( argnr ); + + // Read them all... + for ( int a = 0; a < argnr; a++ ) { + arguments[ a ] = XMLHandler.getTagValue( entrynode, "argument" + a ); + } + + Node parametersNode = XMLHandler.getSubNode( entrynode, "parameters" ); + + String passAll = XMLHandler.getTagValue( parametersNode, "pass_all_parameters" ); + passingAllParameters = Const.isEmpty( passAll ) || "Y".equalsIgnoreCase( passAll ); + + int nrParameters = XMLHandler.countNodes( parametersNode, "parameter" ); + allocateParams( nrParameters ); + + for ( int i = 0; i < nrParameters; i++ ) { + Node knode = XMLHandler.getSubNodeByNr( parametersNode, "parameter", i ); + + parameters[ i ] = XMLHandler.getTagValue( knode, "name" ); + parameterFieldNames[ i ] = XMLHandler.getTagValue( knode, "stream_name" ); + parameterValues[ i ] = XMLHandler.getTagValue( knode, "value" ); + } + } catch ( KettleException e ) { + throw new KettleXMLException( "Unable to load job entry of type 'trans' from XML node", e ); + } + } + + // Load the jobentry from repository + // + public void loadRep( Repository rep, IMetaStore metaStore, ObjectId id_jobentry, List databases, + List slaveServers ) throws KettleException { + try { + String method = rep.getJobEntryAttributeString( id_jobentry, "specification_method" ); + specificationMethod = ObjectLocationSpecificationMethod.getSpecificationMethodByCode( method ); + String transId = rep.getJobEntryAttributeString( id_jobentry, "trans_object_id" ); + transObjectId = Const.isEmpty( transId ) ? null : new StringObjectId( transId ); + transname = rep.getJobEntryAttributeString( id_jobentry, "name" ); + directory = rep.getJobEntryAttributeString( id_jobentry, "dir_path" ); + filename = rep.getJobEntryAttributeString( id_jobentry, "file_name" ); + + // Backward compatibility check for object specification + // + checkObjectLocationSpecificationMethod(); + + argFromPrevious = rep.getJobEntryAttributeBoolean( id_jobentry, "arg_from_previous" ); + paramsFromPrevious = rep.getJobEntryAttributeBoolean( id_jobentry, "params_from_previous" ); + execPerRow = rep.getJobEntryAttributeBoolean( id_jobentry, "exec_per_row" ); + clearResultRows = rep.getJobEntryAttributeBoolean( id_jobentry, "clear_rows", true ); + clearResultFiles = rep.getJobEntryAttributeBoolean( id_jobentry, "clear_files", true ); + setLogfile = rep.getJobEntryAttributeBoolean( id_jobentry, "set_logfile" ); + addDate = rep.getJobEntryAttributeBoolean( id_jobentry, "add_date" ); + addTime = rep.getJobEntryAttributeBoolean( id_jobentry, "add_time" ); + logfile = rep.getJobEntryAttributeString( id_jobentry, "logfile" ); + logext = rep.getJobEntryAttributeString( id_jobentry, "logext" ); + logFileLevel = LogLevel.getLogLevelForCode( rep.getJobEntryAttributeString( id_jobentry, "loglevel" ) ); + clustering = rep.getJobEntryAttributeBoolean( id_jobentry, "cluster" ); + createParentFolder = rep.getJobEntryAttributeBoolean( id_jobentry, "create_parent_folder" ); + + remoteSlaveServerName = rep.getJobEntryAttributeString( id_jobentry, "slave_server_name" ); + setAppendLogfile = rep.getJobEntryAttributeBoolean( id_jobentry, "set_append_logfile" ); + waitingToFinish = rep.getJobEntryAttributeBoolean( id_jobentry, "wait_until_finished", true ); + followingAbortRemotely = rep.getJobEntryAttributeBoolean( id_jobentry, "follow_abort_remote" ); + loggingRemoteWork = rep.getJobEntryAttributeBoolean( id_jobentry, "logging_remote_work" ); + + // How many arguments? + int argnr = rep.countNrJobEntryAttributes( id_jobentry, "argument" ); + allocateArgs( argnr ); + + // Read all arguments... + for ( int a = 0; a < argnr; a++ ) { + arguments[ a ] = rep.getJobEntryAttributeString( id_jobentry, a, "argument" ); + } + + // How many arguments? + int parameternr = rep.countNrJobEntryAttributes( id_jobentry, "parameter_name" ); + allocateParams( parameternr ); + + // Read all parameters ... + for ( int a = 0; a < parameternr; a++ ) { + parameters[ a ] = rep.getJobEntryAttributeString( id_jobentry, a, "parameter_name" ); + parameterFieldNames[ a ] = rep.getJobEntryAttributeString( id_jobentry, a, "parameter_stream_name" ); + parameterValues[ a ] = rep.getJobEntryAttributeString( id_jobentry, a, "parameter_value" ); + } + + passingAllParameters = rep.getJobEntryAttributeBoolean( id_jobentry, "pass_all_parameters", true ); + + } catch ( KettleDatabaseException dbe ) { + throw new KettleException( "Unable to load job entry of type 'trans' from the repository for id_jobentry=" + + id_jobentry, dbe ); + } + } + + // Save the attributes of this job entry + // + public void saveRep( Repository rep, IMetaStore metaStore, ObjectId id_job ) throws KettleException { + try { + rep.saveJobEntryAttribute( id_job, getObjectId(), "specification_method", specificationMethod == null + ? null : specificationMethod.getCode() ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "trans_object_id", transObjectId == null + ? null : transObjectId.toString() ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "name", getTransname() ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "dir_path", getDirectory() != null ? getDirectory() : "" ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "file_name", filename ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "arg_from_previous", argFromPrevious ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "params_from_previous", paramsFromPrevious ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "exec_per_row", execPerRow ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "clear_rows", clearResultRows ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "clear_files", clearResultFiles ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "set_logfile", setLogfile ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "add_date", addDate ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "add_time", addTime ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "logfile", logfile ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "logext", logext ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "loglevel", logFileLevel != null + ? logFileLevel.getCode() : null ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "cluster", clustering ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "slave_server_name", remoteSlaveServerName ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "set_append_logfile", setAppendLogfile ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "wait_until_finished", waitingToFinish ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "follow_abort_remote", followingAbortRemotely ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "create_parent_folder", createParentFolder ); + rep.saveJobEntryAttribute( id_job, getObjectId(), "logging_remote_work", loggingRemoteWork ); + + // Save the arguments... + if ( arguments != null ) { + for ( int i = 0; i < arguments.length; i++ ) { + rep.saveJobEntryAttribute( id_job, getObjectId(), i, "argument", arguments[ i ] ); + } + } + + // Save the parameters... + if ( parameters != null ) { + for ( int i = 0; i < parameters.length; i++ ) { + rep.saveJobEntryAttribute( id_job, getObjectId(), i, "parameter_name", parameters[ i ] ); + rep.saveJobEntryAttribute( id_job, getObjectId(), i, "parameter_stream_name", Const.NVL( + parameterFieldNames[ i ], "" ) ); + rep.saveJobEntryAttribute( id_job, getObjectId(), i, "parameter_value", Const.NVL( + parameterValues[ i ], "" ) ); + } + } + + rep.saveJobEntryAttribute( id_job, getObjectId(), "pass_all_parameters", passingAllParameters ); + + } catch ( KettleDatabaseException dbe ) { + throw new KettleException( + "Unable to save job entry of type 'trans' to the repository for id_job=" + id_job, dbe ); + } + } + + public void clear() { + super.clear(); + + specificationMethod = ObjectLocationSpecificationMethod.FILENAME; + transname = null; + filename = null; + directory = null; + arguments = null; + argFromPrevious = false; + execPerRow = false; + addDate = false; + addTime = false; + logfile = null; + logext = null; + setLogfile = false; + clearResultRows = false; + clearResultFiles = false; + remoteSlaveServerName = null; + setAppendLogfile = false; + waitingToFinish = true; + followingAbortRemotely = false; // backward compatibility reasons + createParentFolder = false; + logFileLevel = LogLevel.BASIC; + } + + /** + * Execute this job entry and return the result. In this case it means, just set the result boolean in the Result + * class. + * + * @param result The result of the previous execution + * @param nr the job entry number + * @return The Result of the execution. + */ + public Result execute( Result result, int nr ) throws KettleException { + result.setEntryNr( nr ); + + LogChannelFileWriter logChannelFileWriter = null; + + LogLevel transLogLevel = parentJob.getLogLevel(); + + String realLogFilename = ""; + if ( setLogfile ) { + transLogLevel = logFileLevel; + + realLogFilename = environmentSubstitute( getLogFilename() ); + + // We need to check here the log filename + // if we do not have one, we must fail + if ( Const.isEmpty( realLogFilename ) ) { + logError( BaseMessages.getString( PKG, "JobTrans.Exception.LogFilenameMissing" ) ); + result.setNrErrors( 1 ); + result.setResult( false ); + return result; + } + // create parent folder? + if ( !FileUtil.createParentFolder( PKG, realLogFilename, createParentFolder, this.getLogChannel(), this ) ) { + result.setNrErrors( 1 ); + result.setResult( false ); + return result; + } + try { + logChannelFileWriter = + new LogChannelFileWriter( + this.getLogChannelId(), KettleVFS.getFileObject( realLogFilename ), setAppendLogfile ); + logChannelFileWriter.startLogging(); + } catch ( KettleException e ) { + logError( BaseMessages.getString( PKG, "JobTrans.Error.UnableOpenAppender", realLogFilename, e.toString() ) ); + + logError( Const.getStackTracker( e ) ); + result.setNrErrors( 1 ); + result.setResult( false ); + return result; + } + } + + // Figure out the remote slave server... + // + SlaveServer remoteSlaveServer = null; + if ( !Const.isEmpty( remoteSlaveServerName ) ) { + String realRemoteSlaveServerName = environmentSubstitute( remoteSlaveServerName ); + remoteSlaveServer = parentJob.getJobMeta().findSlaveServer( realRemoteSlaveServerName ); + if ( remoteSlaveServer == null ) { + throw new KettleException( BaseMessages.getString( + PKG, "JobTrans.Exception.UnableToFindRemoteSlaveServer", realRemoteSlaveServerName ) ); + } + } + + // Open the transformation... + // + switch ( specificationMethod ) { + case FILENAME: + if ( isDetailed() ) { + logDetailed( BaseMessages.getString( + PKG, "JobTrans.Log.OpeningTrans", environmentSubstitute( getFilename() ) ) ); + } + break; + case REPOSITORY_BY_NAME: + if ( isDetailed() ) { + logDetailed( BaseMessages.getString( + PKG, "JobTrans.Log.OpeningTransInDirec", environmentSubstitute( getFilename() ), + environmentSubstitute( directory ) ) ); + } + break; + case REPOSITORY_BY_REFERENCE: + if ( isDetailed() ) { + logDetailed( BaseMessages.getString( PKG, "JobTrans.Log.OpeningTransByReference", transObjectId ) ); + } + break; + default: + break; + } + + // Load the transformation only once for the complete loop! + // Throws an exception if it was not possible to load the transformation. For example, the XML file doesn't exist or + // the repository is down. + // Log the stack trace and return an error condition from this + // + TransMeta transMeta = null; + try { + transMeta = getTransMeta( rep, metaStore, this ); + } catch ( KettleException e ) { + logError( Const.getStackTracker( e ) ); + result.setNrErrors( 1 ); + result.setResult( false ); + return result; + } + + int iteration = 0; + String[] args1 = arguments; + if ( args1 == null || args1.length == 0 ) { // No arguments set, look at the parent job. + args1 = parentJob.getArguments(); + } + // initializeVariablesFrom(parentJob); + + // + // For the moment only do variable translation at the start of a job, not + // for every input row (if that would be switched on). This is for safety, + // the real argument setting is later on. + // + String[] args = null; + if ( args1 != null ) { + args = new String[ args1.length ]; + for ( int idx = 0; idx < args1.length; idx++ ) { + args[ idx ] = environmentSubstitute( args1[ idx ] ); + } + } + + RowMetaAndData resultRow = null; + boolean first = true; + List rows = new ArrayList( result.getRows() ); + + while ( ( first && !execPerRow ) + || ( execPerRow && rows != null && iteration < rows.size() && result.getNrErrors() == 0 ) + && !parentJob.isStopped() ) { + // Clear the result rows of the result + // Otherwise we double the amount of rows every iteration in the simple cases. + // + if ( execPerRow ) { + result.getRows().clear(); + } + if ( rows != null && execPerRow ) { + resultRow = rows.get( iteration ); + } else { + resultRow = null; + } + + NamedParams namedParam = new NamedParamsDefault(); + if ( parameters != null ) { + for ( int idx = 0; idx < parameters.length; idx++ ) { + if ( !Const.isEmpty( parameters[ idx ] ) ) { + // We have a parameter + // + namedParam.addParameterDefinition( parameters[ idx ], "", "Job entry runtime" ); + if ( Const.isEmpty( Const.trim( parameterFieldNames[ idx ] ) ) ) { + // There is no field name specified. + // + String value = Const.NVL( environmentSubstitute( parameterValues[ idx ] ), "" ); + namedParam.setParameterValue( parameters[ idx ], value ); + } else { + // something filled in, in the field column... + // + String value = ""; + if ( resultRow != null ) { + value = resultRow.getString( parameterFieldNames[ idx ], "" ); + } + namedParam.setParameterValue( parameters[ idx ], value ); + } + } + } + } + + first = false; + + Result previousResult = result; + + try { + if ( isDetailed() ) { + logDetailed( BaseMessages.getString( + PKG, "JobTrans.StartingTrans", getFilename(), getName(), getDescription() ) ); + } + + if ( clearResultRows ) { + previousResult.setRows( new ArrayList() ); + } + + if ( clearResultFiles ) { + previousResult.getResultFiles().clear(); + } + + /* + * Set one or more "result" rows on the transformation... + */ + if ( execPerRow ) { + // Execute for each input row + + if ( argFromPrevious ) { + // Copy the input row to the (command line) arguments + + args = null; + if ( resultRow != null ) { + args = new String[ resultRow.size() ]; + for ( int i = 0; i < resultRow.size(); i++ ) { + args[ i ] = resultRow.getString( i, null ); + } + } + } else { + // Just pass a single row + List newList = new ArrayList(); + newList.add( resultRow ); + + // This previous result rows list can be either empty or not. + // Depending on the checkbox "clear result rows" + // In this case, it would execute the transformation with one extra row each time + // Can't figure out a real use-case for it, but hey, who am I to decide that, right? + // :-) + // + previousResult.getRows().addAll( newList ); + } + + if ( paramsFromPrevious ) { // Copy the input the parameters + + if ( parameters != null ) { + for ( int idx = 0; idx < parameters.length; idx++ ) { + if ( !Const.isEmpty( parameters[ idx ] ) ) { + // We have a parameter + if ( Const.isEmpty( Const.trim( parameterFieldNames[ idx ] ) ) ) { + namedParam.setParameterValue( parameters[ idx ], Const.NVL( + environmentSubstitute( parameterValues[ idx ] ), "" ) ); + } else { + String fieldValue = ""; + + if ( resultRow != null ) { + fieldValue = resultRow.getString( parameterFieldNames[ idx ], "" ); + } + // Get the value from the input stream + namedParam.setParameterValue( parameters[ idx ], Const.NVL( fieldValue, "" ) ); + } + } + } + } + } + } else { + if ( argFromPrevious ) { + // Only put the first Row on the arguments + args = null; + if ( resultRow != null ) { + args = new String[ resultRow.size() ]; + for ( int i = 0; i < resultRow.size(); i++ ) { + args[ i ] = resultRow.getString( i, null ); + } + } + } + + if ( paramsFromPrevious ) { + // Copy the input the parameters + if ( parameters != null ) { + for ( int idx = 0; idx < parameters.length; idx++ ) { + if ( !Const.isEmpty( parameters[ idx ] ) ) { + // We have a parameter + if ( Const.isEmpty( Const.trim( parameterFieldNames[ idx ] ) ) ) { + namedParam.setParameterValue( parameters[ idx ], Const.NVL( + environmentSubstitute( parameterValues[ idx ] ), "" ) ); + } else { + String fieldValue = ""; + + if ( resultRow != null ) { + fieldValue = resultRow.getString( parameterFieldNames[ idx ], "" ); + } + // Get the value from the input stream + namedParam.setParameterValue( parameters[ idx ], Const.NVL( fieldValue, "" ) ); + } + } + } + } + } + } + + // Handle the parameters... + // + transMeta.clearParameters(); + String[] parameterNames = transMeta.listParameters(); + for ( int idx = 0; idx < parameterNames.length; idx++ ) { + // Grab the parameter value set in the Trans job entry + // + String thisValue = namedParam.getParameterValue( parameterNames[ idx ] ); + if ( !Const.isEmpty( thisValue ) ) { + // Set the value as specified by the user in the job entry + // + transMeta.setParameterValue( parameterNames[ idx ], thisValue ); + } else { + // See if the parameter had a value set in the parent job... + // This value should pass down to the transformation if that's what we opted to do. + // + if ( isPassingAllParameters() ) { + String parentValue = parentJob.getParameterValue( parameterNames[ idx ] ); + if ( !Const.isEmpty( parentValue ) ) { + transMeta.setParameterValue( parameterNames[ idx ], parentValue ); + } + } + } + } + + // Execute this transformation across a cluster of servers + // + if ( clustering ) { + TransExecutionConfiguration executionConfiguration = new TransExecutionConfiguration(); + executionConfiguration.setClusterPosting( true ); + executionConfiguration.setClusterPreparing( true ); + executionConfiguration.setClusterStarting( true ); + executionConfiguration.setClusterShowingTransformation( false ); + executionConfiguration.setSafeModeEnabled( false ); + executionConfiguration.setRepository( rep ); + executionConfiguration.setLogLevel( transLogLevel ); + executionConfiguration.setPreviousResult( previousResult ); + + // Also pass the variables from the transformation into the execution configuration + // That way it can go over the HTTP connection to the slave server. + // + executionConfiguration.setVariables( transMeta ); + + // Also set the arguments... + // + executionConfiguration.setArgumentStrings( args ); + + if ( parentJob.getJobMeta().isBatchIdPassed() ) { + executionConfiguration.setPassedBatchId( parentJob.getPassedBatchId() ); + } + + TransSplitter transSplitter = null; + long errors = 0; + try { + transSplitter = Trans.executeClustered( transMeta, executionConfiguration ); + + // Monitor the running transformations, wait until they are done. + // Also kill them all if anything goes bad + // Also clean up afterwards... + // + errors += Trans.monitorClusteredTransformation( log, transSplitter, parentJob ); + + } catch ( Exception e ) { + logError( "Error during clustered execution. Cleaning up clustered execution.", e ); + // In case something goes wrong, make sure to clean up afterwards! + // + errors++; + if ( transSplitter != null ) { + Trans.cleanupCluster( log, transSplitter ); + } else { + // Try to clean anyway... + // + SlaveServer master = null; + for ( StepMeta stepMeta : transMeta.getSteps() ) { + if ( stepMeta.isClustered() ) { + for ( SlaveServer slaveServer : stepMeta.getClusterSchema().getSlaveServers() ) { + if ( slaveServer.isMaster() ) { + master = slaveServer; + break; + } + } + } + } + if ( master != null ) { + master.deAllocateServerSockets( transMeta.getName(), null ); + } + } + } + + result.clear(); + + if ( transSplitter != null ) { + Result clusterResult = + Trans.getClusteredTransformationResult( log, transSplitter, parentJob, loggingRemoteWork ); + result.add( clusterResult ); + } + + result.setNrErrors( result.getNrErrors() + errors ); + + } else if ( remoteSlaveServer != null ) { + // Execute this transformation remotely + // + + // Make sure we can parameterize the slave server connection + // + remoteSlaveServer.shareVariablesWith( this ); + + // Remote execution... + // + TransExecutionConfiguration transExecutionConfiguration = new TransExecutionConfiguration(); + transExecutionConfiguration.setPreviousResult( previousResult.clone() ); + transExecutionConfiguration.setArgumentStrings( args ); + transExecutionConfiguration.setVariables( this ); + transExecutionConfiguration.setRemoteServer( remoteSlaveServer ); + transExecutionConfiguration.setLogLevel( transLogLevel ); + transExecutionConfiguration.setRepository( rep ); + transExecutionConfiguration.setLogFileName( realLogFilename ); + transExecutionConfiguration.setSetAppendLogfile( setAppendLogfile ); + transExecutionConfiguration.setSetLogfile( setLogfile ); + + Map params = transExecutionConfiguration.getParams(); + for ( String param : transMeta.listParameters() ) { + String value = + Const.NVL( transMeta.getParameterValue( param ), Const.NVL( + transMeta.getParameterDefault( param ), transMeta.getVariable( param ) ) ); + params.put( param, value ); + } + + if ( parentJob.getJobMeta().isBatchIdPassed() ) { + transExecutionConfiguration.setPassedBatchId( parentJob.getPassedBatchId() ); + } + + // Send the XML over to the slave server + // Also start the transformation over there... + // + String carteObjectId = Trans.sendToSlaveServer( transMeta, transExecutionConfiguration, rep, metaStore ); + + // Now start the monitoring... + // + SlaveServerTransStatus transStatus = null; + while ( !parentJob.isStopped() && waitingToFinish ) { + try { + transStatus = remoteSlaveServer.getTransStatus( transMeta.getName(), carteObjectId, 0 ); + if ( !transStatus.isRunning() ) { + // The transformation is finished, get the result... + // + Result remoteResult = transStatus.getResult(); + result.clear(); + result.add( remoteResult ); + + // In case you manually stop the remote trans (browser etc), make sure it's marked as an error + // + if ( remoteResult.isStopped() ) { + result.setNrErrors( result.getNrErrors() + 1 ); // + } + + // Make sure to clean up : write a log record etc, close any left-over sockets etc. + // + remoteSlaveServer.cleanupTransformation( transMeta.getName(), carteObjectId ); + + break; + } + } catch ( Exception e1 ) { + + logError( BaseMessages.getString( PKG, "JobTrans.Error.UnableContactSlaveServer", "" + + remoteSlaveServer, transMeta.getName() ), e1 ); + result.setNrErrors( result.getNrErrors() + 1L ); + break; // Stop looking too, chances are too low the server will come back on-line + } + + // sleep for 2 seconds + try { + Thread.sleep( 2000 ); + } catch ( InterruptedException e ) { + // Ignore + } + } + + if ( parentJob.isStopped() ) { + // See if we have a status and if we need to stop the remote execution here... + // + if ( transStatus == null || transStatus.isRunning() ) { + // Try a remote abort ... + // + remoteSlaveServer.stopTransformation( transMeta.getName(), transStatus.getId() ); + + // And a cleanup... + // + remoteSlaveServer.cleanupTransformation( transMeta.getName(), transStatus.getId() ); + + // Set an error state! + // + result.setNrErrors( result.getNrErrors() + 1L ); + } + } + + } else { + + // Execute this transformation on the local machine + // + + // Create the transformation from meta-data + // + trans = new Trans( transMeta, this ); + + // Pass the socket repository as early as possible... + // + trans.setSocketRepository( parentJob.getSocketRepository() ); + + if ( parentJob.getJobMeta().isBatchIdPassed() ) { + trans.setPassedBatchId( parentJob.getPassedBatchId() ); + } + + // set the parent job on the transformation, variables are taken from here... + // + trans.setParentJob( parentJob ); + trans.setParentVariableSpace( parentJob ); + trans.setLogLevel( transLogLevel ); + trans.setPreviousResult( previousResult ); + trans.setArguments( arguments ); + + // Mappings need the repository to load from + // + trans.setRepository( rep ); + + // inject the metaStore + trans.setMetaStore( metaStore ); + + // First get the root job + // + Job rootJob = parentJob; + while ( rootJob.getParentJob() != null ) { + rootJob = rootJob.getParentJob(); + } + + // Get the start and end-date from the root job... + // + trans.setJobStartDate( rootJob.getStartDate() ); + trans.setJobEndDate( rootJob.getEndDate() ); + + // Inform the parent job we started something here... + // + for ( DelegationListener delegationListener : parentJob.getDelegationListeners() ) { + // TODO: copy some settings in the job execution configuration, not strictly needed + // but the execution configuration information is useful in case of a job re-start + // + delegationListener.transformationDelegationStarted( trans, new TransExecutionConfiguration() ); + } + + try { + // Start execution... + // + trans.execute( args ); + + // Wait until we're done with it... + //TODO is it possible to implement Observer pattern to avoid Thread.sleep here? + while ( !trans.isFinished() && trans.getErrors() == 0 ) { + if ( parentJob.isStopped() ) { + trans.stopAll(); + break; + } else { + try { + Thread.sleep( 0, 500 ); + } catch ( InterruptedException e ) { + // Ignore errors + } + } + } + trans.waitUntilFinished(); + + if ( parentJob.isStopped() || trans.getErrors() != 0 ) { + trans.stopAll(); + result.setNrErrors( 1 ); + } + Result newResult = trans.getResult(); + + result.clear(); // clear only the numbers, NOT the files or rows. + result.add( newResult ); + + // Set the result rows too, if any ... + if ( !Const.isEmpty( newResult.getRows() ) ) { + result.setRows( newResult.getRows() ); + } + + if ( setLogfile ) { + ResultFile resultFile = + new ResultFile( + ResultFile.FILE_TYPE_LOG, KettleVFS.getFileObject( realLogFilename, this ), parentJob + .getJobname(), toString() + ); + result.getResultFiles().put( resultFile.getFile().toString(), resultFile ); + } + } catch ( KettleException e ) { + + logError( BaseMessages.getString( PKG, "JobTrans.Error.UnablePrepareExec" ), e ); + result.setNrErrors( 1 ); + } + } + } catch ( Exception e ) { + + logError( BaseMessages.getString( PKG, "JobTrans.ErrorUnableOpenTrans", e.getMessage() ) ); + logError( Const.getStackTracker( e ) ); + result.setNrErrors( 1 ); + } + iteration++; + } + + if ( setLogfile ) { + if ( logChannelFileWriter != null ) { + logChannelFileWriter.stopLogging(); + + ResultFile resultFile = + new ResultFile( + ResultFile.FILE_TYPE_LOG, logChannelFileWriter.getLogFile(), parentJob.getJobname(), getName() ); + result.getResultFiles().put( resultFile.getFile().toString(), resultFile ); + + // See if anything went wrong during file writing... + // + if ( logChannelFileWriter.getException() != null ) { + logError( "Unable to open log file [" + getLogFilename() + "] : " ); + logError( Const.getStackTracker( logChannelFileWriter.getException() ) ); + result.setNrErrors( 1 ); + result.setResult( false ); + return result; + } + } + } + + if ( result.getNrErrors() == 0 ) { + result.setResult( true ); + } else { + result.setResult( false ); + } + + return result; + } + + @Deprecated + public TransMeta getTransMeta( Repository rep, VariableSpace space ) throws KettleException { + return getTransMeta( rep, null, space ); + } + + public TransMeta getTransMeta( Repository rep, IMetaStore metaStore, VariableSpace space ) throws KettleException { + try { + TransMeta transMeta = null; + CurrentDirectoryResolver r = new CurrentDirectoryResolver(); + VariableSpace tmpSpace = r.resolveCurrentDirectory( + specificationMethod, space, rep, parentJob, getFilename() ); + switch ( specificationMethod ) { + case FILENAME: + String realFilename = tmpSpace.environmentSubstitute( getFilename() ); + if ( rep != null ) { + realFilename = r.normalizeSlashes( realFilename ); + // need to try to load from the repository + try { + String dirStr = realFilename.substring( 0, realFilename.lastIndexOf( "/" ) ); + String tmpFilename = realFilename.substring( realFilename.lastIndexOf( "/" ) + 1 ); + RepositoryDirectoryInterface dir = rep.findDirectory( dirStr ); + transMeta = rep.loadTransformation( tmpFilename, dir, null, true, null ); + } catch ( KettleException ke ) { + // try without extension + if ( realFilename.endsWith( Const.STRING_TRANS_DEFAULT_EXT ) ) { + try { + String tmpFilename = realFilename.substring( realFilename.lastIndexOf( "/" ) + 1, + realFilename.indexOf( "." + Const.STRING_TRANS_DEFAULT_EXT ) ); + String dirStr = realFilename.substring( 0, realFilename.lastIndexOf( "/" ) ); + RepositoryDirectoryInterface dir = rep.findDirectory( dirStr ); + transMeta = rep.loadTransformation( tmpFilename, dir, null, true, null ); + } catch ( KettleException ke2 ) { + // fall back to try loading from file system (transMeta is going to be null) + } + } + } + } + if ( transMeta == null ) { + logBasic( "Loading transformation from XML file [" + realFilename + "]" ); + transMeta = new TransMeta( realFilename, metaStore, null, true, this, null ); + } + break; + case REPOSITORY_BY_NAME: + String transname = tmpSpace.environmentSubstitute( getTransname() ); + String realDirectory = tmpSpace.environmentSubstitute( getDirectory() ); + + logBasic( BaseMessages.getString( PKG, "JobTrans.Log.LoadingTransRepDirec", transname, realDirectory ) ); + + if ( rep != null ) { + // + // It only makes sense to try to load from the repository when the + // repository is also filled in. + // + // It reads last the last revision from the repository. + // + realDirectory = r.normalizeSlashes( realDirectory ); + + RepositoryDirectoryInterface repositoryDirectory = rep.findDirectory( realDirectory ); + transMeta = rep.loadTransformation( transname, repositoryDirectory, null, true, null ); + } else { + // rep is null, let's try loading by filename + try { + transMeta = new TransMeta( realDirectory + "/" + transname, metaStore, null, true, this, null ); + } catch ( KettleException ke ) { + try { + // add .ktr extension and try again + transMeta = new TransMeta( realDirectory + "/" + transname + "." + Const.STRING_TRANS_DEFAULT_EXT, + metaStore, null, true, this, null ); + } catch ( KettleException ke2 ) { + throw new KettleException( BaseMessages.getString( PKG, "JobTrans.Exception.NoRepDefined" ), ke2 ); + } + } + } + break; + case REPOSITORY_BY_REFERENCE: + if ( transObjectId == null ) { + throw new KettleException( BaseMessages.getString( PKG, + "JobTrans.Exception.ReferencedTransformationIdIsNull" ) ); + } + + if ( rep != null ) { + // Load the last revision + // + transMeta = rep.loadTransformation( transObjectId, null ); + } + break; + default: + throw new KettleException( "The specified object location specification method '" + + specificationMethod + "' is not yet supported in this job entry." ); + } + + if ( transMeta != null ) { + // copy parent variables to this loaded variable space. + // + transMeta.copyVariablesFrom( this ); + + // Pass repository and metastore references + // + transMeta.setRepository( rep ); + transMeta.setMetaStore( metaStore ); + } + + return transMeta; + } catch ( Exception e ) { + throw new KettleException( BaseMessages.getString( PKG, "JobTrans.Exception.MetaDataLoad" ), e ); + } + } + + public boolean evaluates() { + return true; + } + + public boolean isUnconditional() { + return true; + } + + public List getSQLStatements( Repository repository, IMetaStore metaStore, VariableSpace space ) throws KettleException { + this.copyVariablesFrom( space ); + TransMeta transMeta = getTransMeta( repository, metaStore, this ); + + return transMeta.getSQLStatements(); + } + + /** + * @return Returns the directoryPath. + */ + public String getDirectoryPath() { + return directoryPath; + } + + /** + * @param directoryPath The directoryPath to set. + */ + public void setDirectoryPath( String directoryPath ) { + this.directoryPath = directoryPath; + } + + /** + * @return the clustering + */ + public boolean isClustering() { + return clustering; + } + + /** + * @param clustering the clustering to set + */ + public void setClustering( boolean clustering ) { + this.clustering = clustering; + } + + public void check( List remarks, JobMeta jobMeta, VariableSpace space, + Repository repository, IMetaStore metaStore ) { + if ( setLogfile ) { + JobEntryValidatorUtils.andValidator().validate( this, "logfile", remarks, AndValidator.putValidators( JobEntryValidatorUtils.notBlankValidator() ) ); + } + if ( !Const.isEmpty( filename ) ) { + JobEntryValidatorUtils.andValidator().validate( this, "filename", remarks, AndValidator.putValidators( JobEntryValidatorUtils.notBlankValidator() ) ); + } else { + JobEntryValidatorUtils.andValidator().validate( this, "transname", remarks, AndValidator.putValidators( JobEntryValidatorUtils.notBlankValidator() ) ); + JobEntryValidatorUtils.andValidator().validate( this, "directory", remarks, AndValidator.putValidators( JobEntryValidatorUtils.notNullValidator() ) ); + } + } + + public List getResourceDependencies( JobMeta jobMeta ) { + List references = super.getResourceDependencies( jobMeta ); + if ( !Const.isEmpty( filename ) ) { + // During this phase, the variable space hasn't been initialized yet - it seems + // to happen during the execute. As such, we need to use the job meta's resolution + // of the variables. + String realFileName = jobMeta.environmentSubstitute( filename ); + ResourceReference reference = new ResourceReference( this ); + reference.getEntries().add( new ResourceEntry( realFileName, ResourceType.ACTIONFILE ) ); + references.add( reference ); + } + return references; + } + + /** + * We're going to load the transformation meta data referenced here. Then we're going to give it a new filename, + * modify that filename in this entries. The parent caller will have made a copy of it, so it should be OK to do so. + *

+ * Exports the object to a flat-file system, adding content with filename keys to a set of definitions. The supplied + * resource naming interface allows the object to name appropriately without worrying about those parts of the + * implementation specific details. + * + * @param space The variable space to resolve (environment) variables with. + * @param definitions The map containing the filenames and content + * @param namingInterface The resource naming interface allows the object to be named appropriately + * @param repository The repository to load resources from + * @param metaStore the metaStore to load external metadata from + * @return The filename for this object. (also contained in the definitions map) + * @throws KettleException in case something goes wrong during the export + */ + public String exportResources( VariableSpace space, Map definitions, + ResourceNamingInterface namingInterface, Repository repository, IMetaStore metaStore ) throws KettleException { + // Try to load the transformation from repository or file. + // Modify this recursively too... + // + // AGAIN: there is no need to clone this job entry because the caller is responsible for this. + // + // First load the transformation metadata... + // + copyVariablesFrom( space ); + TransMeta transMeta = getTransMeta( repository, space ); + + // Also go down into the transformation and export the files there. (mapping recursively down) + // + String proposedNewFilename = + transMeta.exportResources( transMeta, definitions, namingInterface, repository, metaStore ); + + // To get a relative path to it, we inject ${Internal.Job.Filename.Directory} + // + String newFilename = "${" + Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY + "}/" + proposedNewFilename; + + // Set the correct filename inside the XML. + // + transMeta.setFilename( newFilename ); + + // exports always reside in the root directory, in case we want to turn this into a file repository... + // + transMeta.setRepositoryDirectory( new RepositoryDirectory() ); + + // export to filename ALWAYS (this allows the exported XML to be executed remotely) + // + setSpecificationMethod( ObjectLocationSpecificationMethod.FILENAME ); + + // change it in the job entry + // + filename = newFilename; + + return proposedNewFilename; + } + + protected String getLogfile() { + return logfile; + } + + /** + * @return the remote slave server name + */ + public String getRemoteSlaveServerName() { + return remoteSlaveServerName; + } + + /** + * @param remoteSlaveServerName the remote slave server name to set + */ + public void setRemoteSlaveServerName( String remoteSlaveServerName ) { + this.remoteSlaveServerName = remoteSlaveServerName; + } + + /** + * @return the waitingToFinish + */ + public boolean isWaitingToFinish() { + return waitingToFinish; + } + + /** + * @param waitingToFinish the waitingToFinish to set + */ + public void setWaitingToFinish( boolean waitingToFinish ) { + this.waitingToFinish = waitingToFinish; + } + + /** + * @return the followingAbortRemotely + */ + public boolean isFollowingAbortRemotely() { + return followingAbortRemotely; + } + + /** + * @param followingAbortRemotely the followingAbortRemotely to set + */ + public void setFollowingAbortRemotely( boolean followingAbortRemotely ) { + this.followingAbortRemotely = followingAbortRemotely; + } + + public boolean isLoggingRemoteWork() { + return loggingRemoteWork; + } + + public void setLoggingRemoteWork( boolean loggingRemoteWork ) { + this.loggingRemoteWork = loggingRemoteWork; + } + + /** + * @return the passingAllParameters + */ + public boolean isPassingAllParameters() { + return passingAllParameters; + } + + /** + * @param passingAllParameters the passingAllParameters to set + */ + public void setPassingAllParameters( boolean passingAllParameters ) { + this.passingAllParameters = passingAllParameters; + } + + public Trans getTrans() { + return trans; + } + + /** + * @return the transObjectId + */ + public ObjectId getTransObjectId() { + return transObjectId; + } + + /** + * @param transObjectId the transObjectId to set + */ + public void setTransObjectId( ObjectId transObjectId ) { + this.transObjectId = transObjectId; + } + + /** + * @return the specificationMethod + */ + public ObjectLocationSpecificationMethod getSpecificationMethod() { + return specificationMethod; + } + + /** + * @param specificationMethod the specificationMethod to set + */ + public void setSpecificationMethod( ObjectLocationSpecificationMethod specificationMethod ) { + this.specificationMethod = specificationMethod; + } + + public boolean hasRepositoryReferences() { + return specificationMethod == ObjectLocationSpecificationMethod.REPOSITORY_BY_REFERENCE; + } + + /** + * Look up the references after import + * + * @param repository the repository to reference. + */ + public void lookupRepositoryReferences( Repository repository ) throws KettleException { + // The correct reference is stored in the trans name and directory attributes... + // + RepositoryDirectoryInterface repositoryDirectoryInterface = + RepositoryImportLocation.getRepositoryImportLocation().findDirectory( directory ); + transObjectId = repository.getTransformationID( transname, repositoryDirectoryInterface ); + } + + /** + * @return The objects referenced in the step, like a a transformation, a job, a mapper, a reducer, a combiner, ... + */ + public String[] getReferencedObjectDescriptions() { + return new String[] { BaseMessages.getString( PKG, "JobEntryTrans.ReferencedObject.Description" ), }; + } + + private boolean isTransformationDefined() { + return !Const.isEmpty( filename ) + || transObjectId != null || ( !Const.isEmpty( this.directory ) && !Const.isEmpty( transname ) ); + } + + public boolean[] isReferencedObjectEnabled() { + return new boolean[] { isTransformationDefined(), }; + } + + /** + * Load the referenced object + * + * @param index the referenced object index to load (in case there are multiple references) + * @param rep the repository + * @param metaStore metaStore + * @param space the variable space to use + * @return the referenced object once loaded + * @throws KettleException + */ + public Object loadReferencedObject( int index, Repository rep, IMetaStore metaStore, VariableSpace space ) throws KettleException { + return getTransMeta( rep, metaStore, space ); + } + +} diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/repository/pur/LazyUnifiedRepositoryDirectory.java b/pentaho-kettle/src/main/java/org/pentaho/di/repository/pur/LazyUnifiedRepositoryDirectory.java new file mode 100644 index 0000000..5f68579 --- /dev/null +++ b/pentaho-kettle/src/main/java/org/pentaho/di/repository/pur/LazyUnifiedRepositoryDirectory.java @@ -0,0 +1,348 @@ +/*! + * Copyright 2016 Pentaho Corporation. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package org.pentaho.di.repository.pur; + +import org.apache.commons.lang.StringUtils; +import org.pentaho.di.core.exception.KettleException; +import org.pentaho.di.repository.ObjectId; +import org.pentaho.di.repository.RepositoryDirectory; +import org.pentaho.di.repository.RepositoryDirectoryInterface; +import org.pentaho.di.repository.RepositoryElementMetaInterface; +import org.pentaho.di.repository.RepositoryObjectType; +import org.pentaho.di.repository.StringObjectId; +import org.pentaho.di.repository.pur.model.EERepositoryObject; +import org.pentaho.di.repository.pur.model.RepositoryLock; +import org.pentaho.di.ui.repository.pur.services.ILockService; +import org.pentaho.platform.api.repository2.unified.IUnifiedRepository; +import org.pentaho.platform.api.repository2.unified.RepositoryFile; +import org.pentaho.platform.api.repository2.unified.RepositoryFileTree; +import org.pentaho.platform.api.repository2.unified.RepositoryRequest; +import org.pentaho.platform.repository2.ClientRepositoryPaths; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; + +import static org.pentaho.di.repository.pur.PurRepository.getObjectType; + +/** + * A version of RepositoryDirectoryInterface which only loads from the underlying repository as needed (Lazy) + *

+ * Created by nbaker on 12/22/15. + *

+ * Note the only reason we're extending RepositoryDirectory instead of implementing RepositoryDirectoryInterface is due + * to some interface methods returning RepositoryDirectory!! + */ +public class LazyUnifiedRepositoryDirectory extends RepositoryDirectory { + + private RepositoryFile self; + private IUnifiedRepository repository; + private RepositoryServiceRegistry registry; + private List subdirectories; + private List fileChildren; + private RepositoryDirectoryInterface parent; + private Logger logger = LoggerFactory.getLogger( getClass() ); + + public LazyUnifiedRepositoryDirectory( RepositoryFile self, RepositoryDirectoryInterface parent, + IUnifiedRepository repository, RepositoryServiceRegistry registry ) { + this.self = self; + this.parent = parent; + this.repository = repository; + this.registry = registry; + } + + private String getParentPath( String absolutePath ) { + int parentEndIndex; + if ( absolutePath.endsWith( RepositoryDirectory.DIRECTORY_SEPARATOR ) ) { + parentEndIndex = absolutePath.lastIndexOf( RepositoryDirectory.DIRECTORY_SEPARATOR, absolutePath.length() - 2 ); + } else { + parentEndIndex = absolutePath.lastIndexOf( RepositoryDirectory.DIRECTORY_SEPARATOR ); + } + if ( parentEndIndex < 0 ) { + return null; + } + return absolutePath.substring( 0, parentEndIndex ); + } + + @Override public RepositoryDirectory findDirectory( String path ) { + if ( StringUtils.isEmpty( path ) ) { + return null; + } + String absolutePath; + if ( path.startsWith( RepositoryDirectory.DIRECTORY_SEPARATOR ) ) { + if ( self.getPath().endsWith( RepositoryDirectory.DIRECTORY_SEPARATOR ) ) { + absolutePath = self.getPath() + path.substring( 1 ); + } else { + absolutePath = self.getPath() + path; + } + } else { + if ( self.getPath().endsWith( RepositoryDirectory.DIRECTORY_SEPARATOR ) ) { + absolutePath = self.getPath() + path; + } else { + absolutePath = self.getPath() + RepositoryDirectory.DIRECTORY_SEPARATOR + path; + } + } + + RepositoryFile file = repository.getFile( absolutePath ); + if ( file == null || !file.isFolder() ) { + return null; + } + if ( isRoot() && RepositoryDirectory.DIRECTORY_SEPARATOR.equals( absolutePath ) ) { + return this; + } + + // Verifies if this is the parent directory of file and if so passes this as parent argument + String parentPath = getParentPath( absolutePath ); + if ( self.getPath().endsWith( RepositoryDirectory.DIRECTORY_SEPARATOR ) ) { + if ( parentPath.equals( self.getPath().substring( 0, self.getPath().length() - 1 ) ) ) { + return new LazyUnifiedRepositoryDirectory( file, this, repository, registry ); + } + } else { + if ( parentPath.equals( self.getPath() ) ) { + return new LazyUnifiedRepositoryDirectory( file, this, repository, registry ); + } + } + + return new LazyUnifiedRepositoryDirectory( file, findDirectory( parentPath ), repository, registry ); + + } + + @Override public RepositoryDirectory findChild( String name ) { + return findDirectory( name ); + } + + @Override public RepositoryDirectory findDirectory( String[] path ) { + return findDirectory( StringUtils.join( path, "/" ) ); + } + + @Override public List getChildren() { + if ( subdirectories == null ) { + subdirectories = new ArrayList<>(); + synchronized ( subdirectories ) { + List children = getAllURChildrenFiles(); + for ( RepositoryFile child : children ) { + LazyUnifiedRepositoryDirectory dir = new LazyUnifiedRepositoryDirectory( child, this, repository, registry ); + dir.setObjectId( new StringObjectId( child.getId().toString() ) ); + this.addSubdirectory( dir ); + } + } + } + return subdirectories; + } + + @Override public List getRepositoryObjects() { + if ( fileChildren == null ) { + + fileChildren = new ArrayList(); + synchronized ( fileChildren ) { + + UnifiedRepositoryLockService lockService = + (UnifiedRepositoryLockService) registry.getService( ILockService.class ); + + RepositoryFileTree tree = repository.getTree( new RepositoryRequest( this.self.getPath(), true, 1, null ) ); + + for ( RepositoryFileTree tchild : tree.getChildren() ) { + RepositoryFile child = tchild.getFile(); + + RepositoryLock lock = null; + try { + lock = lockService.getLock( child ); + RepositoryObjectType objectType = getObjectType( child.getName() ); + EERepositoryObject repositoryObject = + new EERepositoryObject( child, this, null, objectType, null, lock, false ); + + repositoryObject.setVersioningEnabled( tchild.getVersioningEnabled() ); + repositoryObject.setVersionCommentEnabled( tchild.getVersionCommentEnabled() ); + fileChildren.add( repositoryObject ); + } catch ( KettleException e ) { + logger.error( "Error converting Unified Repository file to PDI RepositoryObject: " + child.getPath() + + ". File will be skipped", e ); + } + } + } + } + return fileChildren; + + } + + @Override public void setRepositoryObjects( List list ) { + synchronized ( fileChildren ) { + fileChildren.clear(); + fileChildren.addAll( list ); + } + } + + @Override public boolean isVisible() { + return !isRoot() && !self.isHidden(); + } + + + @Override public int getNrSubdirectories() { + List childrenFiles = getAllURChildrenFiles(); + return childrenFiles.size(); + } + + @Override public RepositoryDirectory getSubdirectory( int i ) { + if ( subdirectories == null ) { + getChildren(); + } + + if ( i >= subdirectories.size() || i < 0 ) { + return null; + } + RepositoryDirectoryInterface directoryInterface = subdirectories.get( i ); + // Have to cast due to bad interface + if ( directoryInterface instanceof RepositoryDirectory ) { + return (RepositoryDirectory) directoryInterface; + } + throw new IllegalStateException( + "Bad Repository interface expects RepositoryDirectoryInterface to be an instance of" + + " RepositoryDirectory. This class is not: " + directoryInterface.getClass().getName() ); + } + + private List getAllURChildrenFiles() { + RepositoryRequest repositoryRequest = new RepositoryRequest(); + repositoryRequest.setShowHidden( true ); + repositoryRequest.setTypes( RepositoryRequest.FILES_TYPE_FILTER.FOLDERS ); + repositoryRequest.setPath( this.self.getId().toString() ); + List children = repository.getChildren( repositoryRequest ); + + + // Special case: /etc should not be returned from a directory listing. + RepositoryFile etcFile = null; + if ( this.isRoot() ) { + etcFile = repository.getFile( ClientRepositoryPaths.getEtcFolderPath() ); + } + + // Filter for Folders only doesn't appear to work + Iterator iterator = children.iterator(); + while ( iterator.hasNext() ) { + RepositoryFile next = iterator.next(); + if ( !next.isFolder() ) { + iterator.remove(); + } + + // Special case: /etc should not be returned from a directory listing. + if ( this.isRoot() && next.equals( etcFile ) ) { + iterator.remove(); + } + } + return children; + } + + @Override public void clear() { + if ( this.fileChildren != null ) { + synchronized ( fileChildren ) { + this.fileChildren.clear(); + } + } + if ( this.subdirectories != null ) { + synchronized ( subdirectories ) { + this.subdirectories.clear(); + } + } + } + + @Override public void addSubdirectory( RepositoryDirectoryInterface repositoryDirectoryInterface ) { + if ( subdirectories == null ) { + subdirectories = new ArrayList<>(); + } + synchronized ( subdirectories ) { + this.subdirectories.add( repositoryDirectoryInterface ); + } + } + + @Override public String getName() { + return self.getName(); + } + + @Override public String getPath() { + return self.getPath(); + } + + @Override public ObjectId getObjectId() { + return new StringObjectId( self.getId().toString() ); + } + + @Override public void setChildren( List list ) { + if ( subdirectories == null ) { + subdirectories = new ArrayList<>(); + } + if ( !subdirectories.equals( list ) ) { + synchronized ( subdirectories ) { + subdirectories.clear(); + subdirectories.addAll( list ); + } + } + } + + @Override public String[] getPathArray() { + return getPath().split( RepositoryDirectory.DIRECTORY_SEPARATOR ); + } + + + @Override public ObjectId[] getDirectoryIDs() { + List children = this.getAllURChildrenFiles(); + ObjectId[] objectIds = new ObjectId[ children.size() ]; + for ( int i = 0; i < children.size(); i++ ) { + objectIds[ i ] = new StringObjectId( children.get( i ).getId().toString() ); + } + + return objectIds; + } + + @Override public boolean isRoot() { + return parent == null; + } + + @Override public RepositoryDirectoryInterface findRoot() { + RepositoryDirectoryInterface current = this; + RepositoryDirectoryInterface parent = null; + while ( ( parent = current.getParent() ) != null ) { + current = parent; + } + return current; + } + + @Override public void setParent( RepositoryDirectoryInterface repositoryDirectoryInterface ) { + this.parent = repositoryDirectoryInterface; + } + + @Override public RepositoryDirectoryInterface getParent() { + return parent; + } + + @Override public void setObjectId( ObjectId objectId ) { + // ignore + } + + @Override public void setName( String s ) { + // ignore + } + + + @Override + public String getPathObjectCombination( String transName ) { + if ( isRoot() ) { + return getPath() + transName; + } else { + return getPath() + RepositoryDirectory.DIRECTORY_SEPARATOR + transName; + } + } +} diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/resource/ResourceUtil.java b/pentaho-kettle/src/main/java/org/pentaho/di/resource/ResourceUtil.java new file mode 100644 index 0000000..889e533 --- /dev/null +++ b/pentaho-kettle/src/main/java/org/pentaho/di/resource/ResourceUtil.java @@ -0,0 +1,185 @@ +/*! ****************************************************************************** + * + * Pentaho Data Integration + * + * Copyright (C) 2002-2013 by Pentaho : http://www.pentaho.com + * + ******************************************************************************* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + ******************************************************************************/ + +package org.pentaho.di.resource; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.zip.ZipEntry; +import java.util.zip.ZipOutputStream; + +import org.apache.commons.vfs2.FileObject; +import org.pentaho.di.core.Const; +import org.pentaho.di.core.exception.KettleException; +import org.pentaho.di.core.variables.VariableSpace; +import org.pentaho.di.core.vfs.KettleVFS; +import org.pentaho.di.i18n.BaseMessages; +import org.pentaho.di.repository.Repository; +import org.pentaho.di.trans.TransMeta; +import org.pentaho.metastore.api.IMetaStore; + +public class ResourceUtil { + + private static Class PKG = ResourceUtil.class; // for i18n purposes, needed by Translator2!! + + /** + * Serializes the referenced resource export interface (Job, Transformation, Mapping, Step, Job Entry, etc) to a ZIP + * file. + * + * @param zipFilename + * The ZIP file to put the content in + * @param resourceExportInterface + * the interface to serialize + * @param space + * the space to use for variable replacement + * @param repository + * the repository to load objects from (or null if not used) + * @param metaStore + * the metaStore to load from + * @return The full VFS filename reference to the serialized export interface XML file in the ZIP archive. + * @throws KettleException + * in case anything goes wrong during serialization + */ + public static final TopLevelResource serializeResourceExportInterface( String zipFilename, + ResourceExportInterface resourceExportInterface, VariableSpace space, Repository repository, + IMetaStore metaStore ) throws KettleException { + return serializeResourceExportInterface( + zipFilename, resourceExportInterface, space, repository, metaStore, null, null ); + } + + /** + * Serializes the referenced resource export interface (Job, Transformation, Mapping, Step, Job Entry, etc) to a ZIP + * file. + * + * @param zipFilename + * The ZIP file to put the content in + * @param resourceExportInterface + * the interface to serialize + * @param space + * the space to use for variable replacement + * @param repository + * the repository to load objects from (or null if not used) + * @param injectXML + * The XML to inject into the resulting ZIP archive (optional, can be null) + * @param injectFilename + * The name of the file for the XML to inject in the ZIP archive (optional, can be null) + * @return The full VFS filename reference to the serialized export interface XML file in the ZIP archive. + * @throws KettleException + * in case anything goes wrong during serialization + */ + public static final TopLevelResource serializeResourceExportInterface( String zipFilename, + ResourceExportInterface resourceExportInterface, VariableSpace space, Repository repository, + IMetaStore metaStore, String injectXML, String injectFilename ) throws KettleException { + + ZipOutputStream out = null; + + try { + Map definitions = new HashMap(); + + // In case we want to add an extra pay-load to the exported ZIP file... + // + if ( injectXML != null ) { + ResourceDefinition resourceDefinition = new ResourceDefinition( injectFilename, injectXML ); + definitions.put( injectFilename, resourceDefinition ); + } + + ResourceNamingInterface namingInterface = new SequenceResourceNaming(); + + String topLevelResource = + resourceExportInterface.exportResources( space, definitions, namingInterface, repository, metaStore ); + + if ( topLevelResource != null && !definitions.isEmpty() ) { + + // Create the ZIP file... + // + FileObject fileObject = KettleVFS.getFileObject( zipFilename, space ); + + // Store the XML in the definitions in a ZIP file... + // + out = new ZipOutputStream( KettleVFS.getOutputStream( fileObject, false ) ); + + for ( String filename : definitions.keySet() ) { + ResourceDefinition resourceDefinition = definitions.get( filename ); + + ZipEntry zipEntry = new ZipEntry( resourceDefinition.getFilename() ); + + String comment = + BaseMessages.getString( + PKG, "ResourceUtil.SerializeResourceExportInterface.ZipEntryComment.OriginatingFile", filename, + Const.NVL( resourceDefinition.getOrigin(), "-" ) ); + zipEntry.setComment( comment ); + out.putNextEntry( zipEntry ); + + out.write( resourceDefinition.getContent().getBytes() ); + out.closeEntry(); + } + String zipURL = fileObject.getName().toString(); + return new TopLevelResource( topLevelResource, zipURL, "zip:" + zipURL + "!" + topLevelResource ); + } else { + throw new KettleException( BaseMessages.getString( PKG, "ResourceUtil.Exception.NoResourcesFoundToExport" ) ); + } + } catch ( Exception e ) { + throw new KettleException( BaseMessages.getString( + PKG, "ResourceUtil.Exception.ErrorSerializingExportInterface", resourceExportInterface.toString() ), e ); + } finally { + if ( out != null ) { + try { + out.close(); + } catch ( IOException e ) { + throw new KettleException( BaseMessages.getString( + PKG, "ResourceUtil.Exception.ErrorClosingZipStream", zipFilename ) ); + } + } + } + } + + public static String getExplanation( String zipFilename, String launchFile, + ResourceExportInterface resourceExportInterface ) { + + String commandString = ""; + if ( Const.isWindows() ) { + if ( resourceExportInterface instanceof TransMeta ) { + commandString += "Pan.bat /file:\""; + } else { + commandString += "Kitchen.bat /file:\""; + } + } else { + if ( resourceExportInterface instanceof TransMeta ) { + commandString += "sh pan.sh -file='"; + } else { + commandString += "sh kitchen.sh -file='"; + } + } + commandString += launchFile; + if ( Const.isWindows() ) { + commandString += "\""; + } else { + commandString += "'"; + } + + String message = + BaseMessages.getString( + PKG, "ResourceUtil.ExportResourcesExplanation", zipFilename, commandString, launchFile, Const.CR ); + return message; + } +} diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/Trans.java b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/Trans.java new file mode 100644 index 0000000..916b583 --- /dev/null +++ b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/Trans.java @@ -0,0 +1,5580 @@ +//CHECKSTYLE:FileLength:OFF +/*! ****************************************************************************** + * + * Pentaho Data Integration + * + * Copyright (C) 2002-2015 by Pentaho : http://www.pentaho.com + * + ******************************************************************************* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + ******************************************************************************/ + +package org.pentaho.di.trans; + +import java.io.OutputStreamWriter; +import java.io.PrintWriter; +import java.io.UnsupportedEncodingException; +import java.net.URLEncoder; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.Date; +import java.util.Deque; +import java.util.HashMap; +import java.util.Hashtable; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Timer; +import java.util.TimerTask; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.commons.lang.StringUtils; +import org.apache.commons.vfs2.FileName; +import org.apache.commons.vfs2.FileObject; +import org.pentaho.di.cluster.SlaveServer; +import org.pentaho.di.core.BlockingBatchingRowSet; +import org.pentaho.di.core.BlockingRowSet; +import org.pentaho.di.core.Const; +import org.pentaho.di.core.Counter; +import org.pentaho.di.core.ExecutorInterface; +import org.pentaho.di.core.ExtensionDataInterface; +import org.pentaho.di.core.KettleEnvironment; +import org.pentaho.di.core.QueueRowSet; +import org.pentaho.di.core.Result; +import org.pentaho.di.core.ResultFile; +import org.pentaho.di.core.RowMetaAndData; +import org.pentaho.di.core.RowSet; +import org.pentaho.di.core.SingleRowRowSet; +import org.pentaho.di.core.database.Database; +import org.pentaho.di.core.database.DatabaseMeta; +import org.pentaho.di.core.database.DatabaseTransactionListener; +import org.pentaho.di.core.database.map.DatabaseConnectionMap; +import org.pentaho.di.core.exception.KettleDatabaseException; +import org.pentaho.di.core.exception.KettleException; +import org.pentaho.di.core.exception.KettleFileException; +import org.pentaho.di.core.exception.KettleTransException; +import org.pentaho.di.core.exception.KettleValueException; +import org.pentaho.di.core.extension.ExtensionPointHandler; +import org.pentaho.di.core.extension.KettleExtensionPoint; +import org.pentaho.di.core.logging.ChannelLogTable; +import org.pentaho.di.core.logging.HasLogChannelInterface; +import org.pentaho.di.core.logging.KettleLogStore; +import org.pentaho.di.core.logging.LogChannel; +import org.pentaho.di.core.logging.LogChannelInterface; +import org.pentaho.di.core.logging.LogLevel; +import org.pentaho.di.core.logging.LogStatus; +import org.pentaho.di.core.logging.LoggingHierarchy; +import org.pentaho.di.core.logging.LoggingMetric; +import org.pentaho.di.core.logging.LoggingObjectInterface; +import org.pentaho.di.core.logging.LoggingObjectType; +import org.pentaho.di.core.logging.LoggingRegistry; +import org.pentaho.di.core.logging.Metrics; +import org.pentaho.di.core.logging.MetricsLogTable; +import org.pentaho.di.core.logging.MetricsRegistry; +import org.pentaho.di.core.logging.PerformanceLogTable; +import org.pentaho.di.core.logging.StepLogTable; +import org.pentaho.di.core.logging.TransLogTable; +import org.pentaho.di.core.metrics.MetricsDuration; +import org.pentaho.di.core.metrics.MetricsSnapshotInterface; +import org.pentaho.di.core.metrics.MetricsUtil; +import org.pentaho.di.core.parameters.DuplicateParamException; +import org.pentaho.di.core.parameters.NamedParams; +import org.pentaho.di.core.parameters.NamedParamsDefault; +import org.pentaho.di.core.parameters.UnknownParamException; +import org.pentaho.di.core.row.RowMetaInterface; +import org.pentaho.di.core.row.ValueMeta; +import org.pentaho.di.core.util.EnvUtil; +import org.pentaho.di.core.variables.VariableSpace; +import org.pentaho.di.core.variables.Variables; +import org.pentaho.di.core.vfs.KettleVFS; +import org.pentaho.di.core.xml.XMLHandler; +import org.pentaho.di.i18n.BaseMessages; +import org.pentaho.di.job.DelegationListener; +import org.pentaho.di.job.Job; +import org.pentaho.di.partition.PartitionSchema; +import org.pentaho.di.repository.ObjectId; +import org.pentaho.di.repository.ObjectRevision; +import org.pentaho.di.repository.Repository; +import org.pentaho.di.repository.RepositoryDirectoryInterface; +import org.pentaho.di.resource.ResourceUtil; +import org.pentaho.di.resource.TopLevelResource; +import org.pentaho.di.trans.cluster.TransSplitter; +import org.pentaho.di.trans.performance.StepPerformanceSnapShot; +import org.pentaho.di.trans.step.BaseStep; +import org.pentaho.di.trans.step.BaseStepData.StepExecutionStatus; +import org.pentaho.di.trans.step.RunThread; +import org.pentaho.di.trans.step.StepAdapter; +import org.pentaho.di.trans.step.StepDataInterface; +import org.pentaho.di.trans.step.StepInitThread; +import org.pentaho.di.trans.step.StepInterface; +import org.pentaho.di.trans.step.StepListener; +import org.pentaho.di.trans.step.StepMeta; +import org.pentaho.di.trans.step.StepMetaDataCombi; +import org.pentaho.di.trans.step.StepPartitioningMeta; +import org.pentaho.di.trans.steps.mappinginput.MappingInput; +import org.pentaho.di.trans.steps.mappingoutput.MappingOutput; +import org.pentaho.di.www.AddExportServlet; +import org.pentaho.di.www.PrepareExecutionTransServlet; +import org.pentaho.di.www.RegisterTransServlet; +import org.pentaho.di.www.SlaveServerTransStatus; +import org.pentaho.di.www.SocketRepository; +import org.pentaho.di.www.StartExecutionTransServlet; +import org.pentaho.di.www.WebResult; +import org.pentaho.metastore.api.IMetaStore; + +/** + * This class represents the information and operations associated with the concept of a Transformation. It loads, + * instantiates, initializes, runs, and monitors the execution of the transformation contained in the specified + * TransInfo object. + * + * @author Matt + * @since 07-04-2003 + * + */ +public class Trans implements VariableSpace, NamedParams, HasLogChannelInterface, LoggingObjectInterface, + ExecutorInterface, ExtensionDataInterface { + + /** The package name, used for internationalization of messages. */ + private static Class PKG = Trans.class; // for i18n purposes, needed by Translator2!! + + /** The replay date format. */ + public static final String REPLAY_DATE_FORMAT = "yyyy/MM/dd HH:mm:ss"; + + /** The log channel interface. */ + protected LogChannelInterface log; + + /** The log level. */ + protected LogLevel logLevel = LogLevel.BASIC; + + /** The container object id. */ + protected String containerObjectId; + + /** The log commit size. */ + protected int logCommitSize = 10; + + /** The transformation metadata to execute. */ + protected TransMeta transMeta; + + /** + * The repository we are referencing. + */ + protected Repository repository; + + /** + * The MetaStore to use + */ + protected IMetaStore metaStore; + + /** + * The job that's launching this transformation. This gives us access to the whole chain, including the parent + * variables, etc. + */ + private Job parentJob; + + /** + * The transformation that is executing this transformation in case of mappings. + */ + private Trans parentTrans; + + /** The parent logging object interface (this could be a transformation or a job). */ + private LoggingObjectInterface parent; + + /** The name of the mapping step that executes this transformation in case this is a mapping. */ + private String mappingStepName; + + /** Indicates that we want to monitor the running transformation in a GUI. */ + private boolean monitored; + + /** + * Indicates that we are running in preview mode... + */ + private boolean preview; + + /** The date objects for logging information about the transformation such as start and end time, etc. */ + private Date startDate, endDate, currentDate, logDate, depDate; + + /** The job start and end date. */ + private Date jobStartDate, jobEndDate; + + /** The batch id. */ + private long batchId; + + /** + * This is the batch ID that is passed from job to job to transformation, if nothing is passed, it's the + * transformation's batch id. + */ + private long passedBatchId; + + /** The variable bindings for the transformation. */ + private VariableSpace variables = new Variables(); + + /** A list of all the row sets. */ + private List rowsets; + + /** A list of all the steps. */ + private List steps; + + /** The class number. */ + public int class_nr; + + /** + * The replayDate indicates that this transformation is a replay transformation for a transformation executed on + * replayDate. If replayDate is null, the transformation is not a replay. + */ + private Date replayDate; + + /** Constant indicating a dispatch type of 1-to-1. */ + public static final int TYPE_DISP_1_1 = 1; + + /** Constant indicating a dispatch type of 1-to-N. */ + public static final int TYPE_DISP_1_N = 2; + + /** Constant indicating a dispatch type of N-to-1. */ + public static final int TYPE_DISP_N_1 = 3; + + /** Constant indicating a dispatch type of N-to-N. */ + public static final int TYPE_DISP_N_N = 4; + + /** Constant indicating a dispatch type of N-to-M. */ + public static final int TYPE_DISP_N_M = 5; + + /** Constant indicating a transformation status of Finished. */ + public static final String STRING_FINISHED = "Finished"; + + /** Constant indicating a transformation status of Finished (with errors). */ + public static final String STRING_FINISHED_WITH_ERRORS = "Finished (with errors)"; + + /** Constant indicating a transformation status of Running. */ + public static final String STRING_RUNNING = "Running"; + + /** Constant indicating a transformation status of Paused. */ + public static final String STRING_PAUSED = "Paused"; + + /** Constant indicating a transformation status of Preparing for execution. */ + public static final String STRING_PREPARING = "Preparing executing"; + + /** Constant indicating a transformation status of Initializing. */ + public static final String STRING_INITIALIZING = "Initializing"; + + /** Constant indicating a transformation status of Waiting. */ + public static final String STRING_WAITING = "Waiting"; + + /** Constant indicating a transformation status of Stopped. */ + public static final String STRING_STOPPED = "Stopped"; + + /** Constant indicating a transformation status of Halting. */ + public static final String STRING_HALTING = "Halting"; + + /** Constant specifying a filename containing XML to inject into a ZIP file created during resource export. */ + public static final String CONFIGURATION_IN_EXPORT_FILENAME = "__job_execution_configuration__.xml"; + + /** Whether safe mode is enabled. */ + private boolean safeModeEnabled; + + /** The thread name. */ + @Deprecated + private String threadName; + + /** The transaction ID */ + private String transactionId; + + /** Whether the transformation is preparing for execution. */ + private volatile boolean preparing; + + /** Whether the transformation is initializing. */ + private boolean initializing; + + /** Whether the transformation is running. */ + private boolean running; + + /** Whether the transformation is finished. */ + private final AtomicBoolean finished; + + /** Whether the transformation is paused. */ + private AtomicBoolean paused; + + /** Whether the transformation is stopped. */ + private AtomicBoolean stopped; + + /** The number of errors that have occurred during execution of the transformation. */ + private AtomicInteger errors; + + /** Whether the transformation is ready to start. */ + private boolean readyToStart; + + /** Step performance snapshots. */ + private Map> stepPerformanceSnapShots; + + /** The step performance snapshot timer. */ + private Timer stepPerformanceSnapShotTimer; + + /** A list of listeners attached to the transformation. */ + private List transListeners; + + /** A list of stop-event listeners attached to the transformation. */ + private List transStoppedListeners; + + /** In case this transformation starts to delegate work to a local transformation or job */ + private List delegationListeners; + + /** The number of finished steps. */ + private int nrOfFinishedSteps; + + /** The number of active steps. */ + private int nrOfActiveSteps; + + /** The named parameters. */ + private NamedParams namedParams = new NamedParamsDefault(); + + /** The socket repository. */ + private SocketRepository socketRepository; + + /** The transformation log table database connection. */ + private Database transLogTableDatabaseConnection; + + /** The step performance snapshot sequence number. */ + private AtomicInteger stepPerformanceSnapshotSeqNr; + + /** The last written step performance sequence number. */ + private int lastWrittenStepPerformanceSequenceNr; + + /** The last step performance snapshot sequence number added. */ + private int lastStepPerformanceSnapshotSeqNrAdded; + + /** The active subtransformations. */ + private Map activeSubtransformations; + + /** The active subjobs */ + private Map activeSubjobs; + + /** The step performance snapshot size limit. */ + private int stepPerformanceSnapshotSizeLimit; + + /** The servlet print writer. */ + private PrintWriter servletPrintWriter; + + /** The trans finished blocking queue. */ + private ArrayBlockingQueue transFinishedBlockingQueue; + + /** The name of the executing server */ + private String executingServer; + + /** The name of the executing user */ + private String executingUser; + + private Result previousResult; + + protected List resultRows; + + protected List resultFiles; + + /** The command line arguments for the transformation. */ + protected String[] arguments; + + /** + * A table of named counters. + */ + protected Hashtable counters; + + private HttpServletResponse servletResponse; + + private HttpServletRequest servletRequest; + + private Map extensionDataMap; + + private ExecutorService heartbeat = null; // this transformations's heartbeat scheduled executor + + /** + * Instantiates a new transformation. + */ + public Trans() { + finished = new AtomicBoolean( false ); + paused = new AtomicBoolean( false ); + stopped = new AtomicBoolean( false ); + + transListeners = Collections.synchronizedList( new ArrayList() ); + transStoppedListeners = Collections.synchronizedList( new ArrayList() ); + delegationListeners = new ArrayList(); + + // Get a valid transactionId in case we run database transactional. + transactionId = calculateTransactionId(); + threadName = transactionId; // / backward compatibility but deprecated! + + errors = new AtomicInteger( 0 ); + + stepPerformanceSnapshotSeqNr = new AtomicInteger( 0 ); + lastWrittenStepPerformanceSequenceNr = 0; + + activeSubtransformations = new HashMap(); + activeSubjobs = new HashMap(); + + resultRows = new ArrayList(); + resultFiles = new ArrayList(); + counters = new Hashtable(); + + extensionDataMap = new HashMap(); + } + + /** + * Initializes a transformation from transformation meta-data defined in memory. + * + * @param transMeta + * the transformation meta-data to use. + */ + public Trans( TransMeta transMeta ) { + this( transMeta, null ); + } + + /** + * Initializes a transformation from transformation meta-data defined in memory. Also take into account the parent log + * channel interface (job or transformation) for logging lineage purposes. + * + * @param transMeta + * the transformation meta-data to use. + * @param parent + * the parent job that is executing this transformation + */ + public Trans( TransMeta transMeta, LoggingObjectInterface parent ) { + this(); + this.transMeta = transMeta; + setParent( parent ); + + initializeVariablesFrom( transMeta ); + copyParametersFrom( transMeta ); + transMeta.activateParameters(); + + // Get a valid transactionId in case we run database transactional. + transactionId = calculateTransactionId(); + threadName = transactionId; // / backward compatibility but deprecated! + } + + /** + * Sets the parent logging object. + * + * @param parent + * the new parent + */ + public void setParent( LoggingObjectInterface parent ) { + this.parent = parent; + + this.log = new LogChannel( this, parent ); + this.logLevel = log.getLogLevel(); + this.containerObjectId = log.getContainerObjectId(); + + if ( log.isDetailed() ) { + log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.TransformationIsPreloaded" ) ); + } + if ( log.isDebug() ) { + log.logDebug( BaseMessages.getString( PKG, "Trans.Log.NumberOfStepsToRun", String.valueOf( transMeta.nrSteps() ), + String.valueOf( transMeta.nrTransHops() ) ) ); + } + + } + + /** + * Sets the default log commit size. + */ + private void setDefaultLogCommitSize() { + String propLogCommitSize = this.getVariable( "pentaho.log.commit.size" ); + if ( propLogCommitSize != null ) { + // override the logCommit variable + try { + logCommitSize = Integer.parseInt( propLogCommitSize ); + } catch ( Exception ignored ) { + logCommitSize = 10; // ignore parsing error and default to 10 + } + } + + } + + /** + * Gets the log channel interface for the transformation. + * + * @return the log channel + * @see org.pentaho.di.core.logging.HasLogChannelInterface#getLogChannel() + */ + public LogChannelInterface getLogChannel() { + return log; + } + + /** + * Sets the log channel interface for the transformation. + * + * @param log + * the new log channel interface + */ + public void setLog( LogChannelInterface log ) { + this.log = log; + } + + /** + * Gets the name of the transformation. + * + * @return the transformation name + */ + public String getName() { + if ( transMeta == null ) { + return null; + } + + return transMeta.getName(); + } + + /** + * Instantiates a new transformation using any of the provided parameters including the variable bindings, a + * repository, a name, a repository directory name, and a filename. This is a multi-purpose method that supports + * loading a transformation from a file (if the filename is provided but not a repository object) or from a repository + * (if the repository object, repository directory name, and transformation name are specified). + * + * @param parent + * the parent variable space and named params + * @param rep + * the repository + * @param name + * the name of the transformation + * @param dirname + * the dirname the repository directory name + * @param filename + * the filename containing the transformation definition + * @throws KettleException + * if any error occurs during loading, parsing, or creation of the transformation + */ + public Trans( Parent parent, Repository rep, String name, String dirname, + String filename ) throws KettleException { + this(); + try { + if ( rep != null ) { + RepositoryDirectoryInterface repdir = rep.findDirectory( dirname ); + if ( repdir != null ) { + this.transMeta = rep.loadTransformation( name, repdir, null, false, null ); // reads last version + } else { + throw new KettleException( BaseMessages.getString( PKG, "Trans.Exception.UnableToLoadTransformation", name, + dirname ) ); + } + } else { + transMeta = new TransMeta( filename, false ); + } + + this.log = LogChannel.GENERAL; + + transMeta.initializeVariablesFrom( parent ); + initializeVariablesFrom( parent ); + // PDI-3064 do not erase parameters from meta! + // instead of this - copy parameters to actual transformation + this.copyParametersFrom( parent ); + this.activateParameters(); + + this.setDefaultLogCommitSize(); + + // Get a valid transactionId in case we run database transactional. + transactionId = calculateTransactionId(); + threadName = transactionId; // / backward compatibility but deprecated! + } catch ( KettleException e ) { + throw new KettleException( BaseMessages.getString( PKG, "Trans.Exception.UnableToOpenTransformation", name ), e ); + } + } + + /** + * Executes the transformation. This method will prepare the transformation for execution and then start all the + * threads associated with the transformation and its steps. + * + * @param arguments + * the arguments + * @throws KettleException + * if the transformation could not be prepared (initialized) + */ + public void execute( String[] arguments ) throws KettleException { + prepareExecution( arguments ); + startThreads(); + } + + /** + * Prepares the transformation for execution. This includes setting the arguments and parameters as well as preparing + * and tracking the steps and hops in the transformation. + * + * @param arguments + * the arguments to use for this transformation + * @throws KettleException + * in case the transformation could not be prepared (initialized) + */ + public void prepareExecution( String[] arguments ) throws KettleException { + preparing = true; + startDate = null; + running = false; + + log.snap( Metrics.METRIC_TRANSFORMATION_EXECUTION_START ); + log.snap( Metrics.METRIC_TRANSFORMATION_INIT_START ); + + ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.TransformationPrepareExecution.id, this ); + + checkCompatibility(); + + // Set the arguments on the transformation... + // + if ( arguments != null ) { + setArguments( arguments ); + } + + activateParameters(); + transMeta.activateParameters(); + + if ( transMeta.getName() == null ) { + if ( transMeta.getFilename() != null ) { + log.logBasic( BaseMessages.getString( PKG, "Trans.Log.DispacthingStartedForFilename", transMeta + .getFilename() ) ); + } + } else { + log.logBasic( BaseMessages.getString( PKG, "Trans.Log.DispacthingStartedForTransformation", transMeta + .getName() ) ); + } + + if ( getArguments() != null ) { + if ( log.isDetailed() ) { + log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.NumberOfArgumentsDetected", String.valueOf( + getArguments().length ) ) ); + } + } + + if ( isSafeModeEnabled() ) { + if ( log.isDetailed() ) { + log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.SafeModeIsEnabled", transMeta.getName() ) ); + } + } + + if ( getReplayDate() != null ) { + SimpleDateFormat df = new SimpleDateFormat( REPLAY_DATE_FORMAT ); + log.logBasic( BaseMessages.getString( PKG, "Trans.Log.ThisIsAReplayTransformation" ) + df.format( + getReplayDate() ) ); + } else { + if ( log.isDetailed() ) { + log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.ThisIsNotAReplayTransformation" ) ); + } + } + + // setInternalKettleVariables(this); --> Let's not do this, when running + // without file, for example remote, it spoils the fun + + // extra check to see if the servlet print writer has some value in case + // folks want to test it locally... + // + if ( servletPrintWriter == null ) { + String encoding = System.getProperty( "KETTLE_DEFAULT_SERVLET_ENCODING", null ); + if ( encoding == null ) { + servletPrintWriter = new PrintWriter( new OutputStreamWriter( System.out ) ); + } else { + try { + servletPrintWriter = new PrintWriter( new OutputStreamWriter( System.out, encoding ) ); + } catch ( UnsupportedEncodingException ex ) { + servletPrintWriter = new PrintWriter( new OutputStreamWriter( System.out ) ); + } + } + } + + // Keep track of all the row sets and allocated steps + // + steps = new ArrayList(); + rowsets = new ArrayList(); + + List hopsteps = transMeta.getTransHopSteps( false ); + + if ( log.isDetailed() ) { + log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.FoundDefferentSteps", String.valueOf( hopsteps + .size() ) ) ); + log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.AllocatingRowsets" ) ); + } + // First allocate all the rowsets required! + // Note that a mapping doesn't receive ANY input or output rowsets... + // + for ( int i = 0; i < hopsteps.size(); i++ ) { + StepMeta thisStep = hopsteps.get( i ); + if ( thisStep.isMapping() ) { + continue; // handled and allocated by the mapping step itself. + } + + if ( log.isDetailed() ) { + log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.AllocateingRowsetsForStep", String.valueOf( i ), + thisStep.getName() ) ); + } + + List nextSteps = transMeta.findNextSteps( thisStep ); + int nrTargets = nextSteps.size(); + + for ( int n = 0; n < nrTargets; n++ ) { + // What's the next step? + StepMeta nextStep = nextSteps.get( n ); + if ( nextStep.isMapping() ) { + continue; // handled and allocated by the mapping step itself. + } + + // How many times do we start the source step? + int thisCopies = thisStep.getCopies(); + + if ( thisCopies < 0 ) { + // This can only happen if a variable is used that didn't resolve to a positive integer value + // + throw new KettleException( BaseMessages.getString( PKG, "Trans.Log.StepCopiesNotCorrectlyDefined", thisStep + .getName() ) ); + } + + // How many times do we start the target step? + int nextCopies = nextStep.getCopies(); + + // Are we re-partitioning? + boolean repartitioning; + if ( thisStep.isPartitioned() ) { + repartitioning = !thisStep.getStepPartitioningMeta().equals( nextStep.getStepPartitioningMeta() ); + } else { + repartitioning = nextStep.isPartitioned(); + } + + int nrCopies; + if ( log.isDetailed() ) { + log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.copiesInfo", String.valueOf( thisCopies ), String + .valueOf( nextCopies ) ) ); + } + int dispatchType; + if ( thisCopies == 1 && nextCopies == 1 ) { + dispatchType = TYPE_DISP_1_1; + nrCopies = 1; + } else if ( thisCopies == 1 && nextCopies > 1 ) { + dispatchType = TYPE_DISP_1_N; + nrCopies = nextCopies; + } else if ( thisCopies > 1 && nextCopies == 1 ) { + dispatchType = TYPE_DISP_N_1; + nrCopies = thisCopies; + } else if ( thisCopies == nextCopies && !repartitioning ) { + dispatchType = TYPE_DISP_N_N; + nrCopies = nextCopies; + } else { + // > 1! + dispatchType = TYPE_DISP_N_M; + nrCopies = nextCopies; + } // Allocate a rowset for each destination step + + // Allocate the rowsets + // + if ( dispatchType != TYPE_DISP_N_M ) { + for ( int c = 0; c < nrCopies; c++ ) { + RowSet rowSet; + switch ( transMeta.getTransformationType() ) { + case Normal: + // This is a temporary patch until the batching rowset has proven + // to be working in all situations. + // Currently there are stalling problems when dealing with small + // amounts of rows. + // + Boolean batchingRowSet = + ValueMeta.convertStringToBoolean( System.getProperty( Const.KETTLE_BATCHING_ROWSET ) ); + if ( batchingRowSet != null && batchingRowSet.booleanValue() ) { + rowSet = new BlockingBatchingRowSet( transMeta.getSizeRowset() ); + } else { + rowSet = new BlockingRowSet( transMeta.getSizeRowset() ); + } + break; + + case SerialSingleThreaded: + rowSet = new SingleRowRowSet(); + break; + + case SingleThreaded: + rowSet = new QueueRowSet(); + break; + + default: + throw new KettleException( "Unhandled transformation type: " + transMeta.getTransformationType() ); + } + + switch ( dispatchType ) { + case TYPE_DISP_1_1: + rowSet.setThreadNameFromToCopy( thisStep.getName(), 0, nextStep.getName(), 0 ); + break; + case TYPE_DISP_1_N: + rowSet.setThreadNameFromToCopy( thisStep.getName(), 0, nextStep.getName(), c ); + break; + case TYPE_DISP_N_1: + rowSet.setThreadNameFromToCopy( thisStep.getName(), c, nextStep.getName(), 0 ); + break; + case TYPE_DISP_N_N: + rowSet.setThreadNameFromToCopy( thisStep.getName(), c, nextStep.getName(), c ); + break; + default: + break; + } + rowsets.add( rowSet ); + if ( log.isDetailed() ) { + log.logDetailed( BaseMessages.getString( PKG, "Trans.TransformationAllocatedNewRowset", rowSet + .toString() ) ); + } + } + } else { + // For each N source steps we have M target steps + // + // From each input step we go to all output steps. + // This allows maximum flexibility for re-partitioning, + // distribution... + for ( int s = 0; s < thisCopies; s++ ) { + for ( int t = 0; t < nextCopies; t++ ) { + BlockingRowSet rowSet = new BlockingRowSet( transMeta.getSizeRowset() ); + rowSet.setThreadNameFromToCopy( thisStep.getName(), s, nextStep.getName(), t ); + rowsets.add( rowSet ); + if ( log.isDetailed() ) { + log.logDetailed( BaseMessages.getString( PKG, "Trans.TransformationAllocatedNewRowset", rowSet + .toString() ) ); + } + } + } + } + } + log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.AllocatedRowsets", String.valueOf( rowsets.size() ), + String.valueOf( i ), thisStep.getName() ) + " " ); + } + + if ( log.isDetailed() ) { + log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.AllocatingStepsAndStepData" ) ); + } + + // Allocate the steps & the data... + // + for ( int i = 0; i < hopsteps.size(); i++ ) { + StepMeta stepMeta = hopsteps.get( i ); + String stepid = stepMeta.getStepID(); + + if ( log.isDetailed() ) { + log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.TransformationIsToAllocateStep", stepMeta.getName(), + stepid ) ); + } + + // How many copies are launched of this step? + int nrCopies = stepMeta.getCopies(); + + if ( log.isDebug() ) { + log.logDebug( BaseMessages.getString( PKG, "Trans.Log.StepHasNumberRowCopies", String.valueOf( nrCopies ) ) ); + } + + // At least run once... + for ( int c = 0; c < nrCopies; c++ ) { + // Make sure we haven't started it yet! + if ( !hasStepStarted( stepMeta.getName(), c ) ) { + StepMetaDataCombi combi = new StepMetaDataCombi(); + + combi.stepname = stepMeta.getName(); + combi.copy = c; + + // The meta-data + combi.stepMeta = stepMeta; + combi.meta = stepMeta.getStepMetaInterface(); + + // Allocate the step data + StepDataInterface data = combi.meta.getStepData(); + combi.data = data; + + // Allocate the step + StepInterface step = combi.meta.getStep( stepMeta, data, c, transMeta, this ); + + // Copy the variables of the transformation to the step... + // don't share. Each copy of the step has its own variables. + // + step.initializeVariablesFrom( this ); + step.setUsingThreadPriorityManagment( transMeta.isUsingThreadPriorityManagment() ); + + // Pass the connected repository & metaStore to the steps runtime + // + step.setRepository( repository ); + step.setMetaStore( metaStore ); + + // If the step is partitioned, set the partitioning ID and some other + // things as well... + if ( stepMeta.isPartitioned() ) { + List partitionIDs = stepMeta.getStepPartitioningMeta().getPartitionSchema().getPartitionIDs(); + if ( partitionIDs != null && partitionIDs.size() > 0 ) { + step.setPartitionID( partitionIDs.get( c ) ); // Pass the partition ID + // to the step + } + } + + // Save the step too + combi.step = step; + + // Pass logging level and metrics gathering down to the step level. + // / + if ( combi.step instanceof LoggingObjectInterface ) { + LogChannelInterface logChannel = combi.step.getLogChannel(); + logChannel.setLogLevel( logLevel ); + logChannel.setGatheringMetrics( log.isGatheringMetrics() ); + } + + // Add to the bunch... + steps.add( combi ); + + if ( log.isDetailed() ) { + log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.TransformationHasAllocatedANewStep", stepMeta + .getName(), String.valueOf( c ) ) ); + } + } + } + } + + // Now we need to verify if certain rowsets are not meant to be for error + // handling... + // Loop over the steps and for every step verify the output rowsets + // If a rowset is going to a target step in the steps error handling + // metadata, set it to the errorRowSet. + // The input rowsets are already in place, so the next step just accepts the + // rows. + // Metadata wise we need to do the same trick in TransMeta + // + for ( int s = 0; s < steps.size(); s++ ) { + StepMetaDataCombi combi = steps.get( s ); + if ( combi.stepMeta.isDoingErrorHandling() ) { + combi.step.identifyErrorOutput(); + + } + } + + // Now (optionally) write start log record! + // Make sure we synchronize appropriately to avoid duplicate batch IDs. + // + Object syncObject = this; + if ( parentJob != null ) { + syncObject = parentJob; // parallel execution in a job + } + if ( parentTrans != null ) { + syncObject = parentTrans; // multiple sub-transformations + } + synchronized ( syncObject ) { + calculateBatchIdAndDateRange(); + beginProcessing(); + } + + // Set the partition-to-rowset mapping + // + for ( int i = 0; i < steps.size(); i++ ) { + StepMetaDataCombi sid = steps.get( i ); + + StepMeta stepMeta = sid.stepMeta; + StepInterface baseStep = sid.step; + + baseStep.setPartitioned( stepMeta.isPartitioned() ); + + // Now let's take a look at the source and target relation + // + // If this source step is not partitioned, and the target step is: it + // means we need to re-partition the incoming data. + // If both steps are partitioned on the same method and schema, we don't + // need to re-partition + // If both steps are partitioned on a different method or schema, we need + // to re-partition as well. + // If both steps are not partitioned, we don't need to re-partition + // + boolean isThisPartitioned = stepMeta.isPartitioned(); + PartitionSchema thisPartitionSchema = null; + if ( isThisPartitioned ) { + thisPartitionSchema = stepMeta.getStepPartitioningMeta().getPartitionSchema(); + } + + boolean isNextPartitioned = false; + StepPartitioningMeta nextStepPartitioningMeta = null; + PartitionSchema nextPartitionSchema = null; + + List nextSteps = transMeta.findNextSteps( stepMeta ); + int nrNext = nextSteps.size(); + for ( int p = 0; p < nrNext; p++ ) { + StepMeta nextStep = nextSteps.get( p ); + if ( nextStep.isPartitioned() ) { + isNextPartitioned = true; + nextStepPartitioningMeta = nextStep.getStepPartitioningMeta(); + nextPartitionSchema = nextStepPartitioningMeta.getPartitionSchema(); + } + } + + baseStep.setRepartitioning( StepPartitioningMeta.PARTITIONING_METHOD_NONE ); + + // If the next step is partitioned differently, set re-partitioning, when + // running locally. + // + if ( ( !isThisPartitioned && isNextPartitioned ) || ( isThisPartitioned && isNextPartitioned + && !thisPartitionSchema.equals( nextPartitionSchema ) ) ) { + baseStep.setRepartitioning( nextStepPartitioningMeta.getMethodType() ); + } + + // For partitioning to a set of remove steps (repartitioning from a master + // to a set or remote output steps) + // + StepPartitioningMeta targetStepPartitioningMeta = baseStep.getStepMeta().getTargetStepPartitioningMeta(); + if ( targetStepPartitioningMeta != null ) { + baseStep.setRepartitioning( targetStepPartitioningMeta.getMethodType() ); + } + } + + preparing = false; + initializing = true; + + // Do a topology sort... Over 150 step (copies) things might be slowing down too much. + // + if ( isMonitored() && steps.size() < 150 ) { + doTopologySortOfSteps(); + } + + if ( log.isDetailed() ) { + log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.InitialisingSteps", String.valueOf( steps.size() ) ) ); + } + + StepInitThread[] initThreads = new StepInitThread[steps.size()]; + Thread[] threads = new Thread[steps.size()]; + + // Initialize all the threads... + // + for ( int i = 0; i < steps.size(); i++ ) { + final StepMetaDataCombi sid = steps.get( i ); + + // Do the init code in the background! + // Init all steps at once, but ALL steps need to finish before we can + // continue properly! + // + initThreads[i] = new StepInitThread( sid, log ); + + // Put it in a separate thread! + // + threads[i] = new Thread( initThreads[i] ); + threads[i].setName( "init of " + sid.stepname + "." + sid.copy + " (" + threads[i].getName() + ")" ); + + ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.StepBeforeInitialize.id, initThreads[i] ); + + threads[i].start(); + } + + for ( int i = 0; i < threads.length; i++ ) { + try { + threads[i].join(); + ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.StepAfterInitialize.id, initThreads[i] ); + } catch ( Exception ex ) { + log.logError( "Error with init thread: " + ex.getMessage(), ex.getMessage() ); + log.logError( Const.getStackTracker( ex ) ); + } + } + + initializing = false; + boolean ok = true; + + // All step are initialized now: see if there was one that didn't do it + // correctly! + // + for ( int i = 0; i < initThreads.length; i++ ) { + StepMetaDataCombi combi = initThreads[i].getCombi(); + if ( !initThreads[i].isOk() ) { + log.logError( BaseMessages.getString( PKG, "Trans.Log.StepFailedToInit", combi.stepname + "." + combi.copy ) ); + combi.data.setStatus( StepExecutionStatus.STATUS_STOPPED ); + ok = false; + } else { + combi.data.setStatus( StepExecutionStatus.STATUS_IDLE ); + if ( log.isDetailed() ) { + log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.StepInitialized", combi.stepname + "." + + combi.copy ) ); + } + } + } + + if ( !ok ) { + // Halt the other threads as well, signal end-of-the line to the outside + // world... + // Also explicitly call dispose() to clean up resources opened during + // init(); + // + for ( int i = 0; i < initThreads.length; i++ ) { + StepMetaDataCombi combi = initThreads[i].getCombi(); + + // Dispose will overwrite the status, but we set it back right after + // this. + combi.step.dispose( combi.meta, combi.data ); + + if ( initThreads[i].isOk() ) { + combi.data.setStatus( StepExecutionStatus.STATUS_HALTED ); + } else { + combi.data.setStatus( StepExecutionStatus.STATUS_STOPPED ); + } + } + + // Just for safety, fire the trans finished listeners... + try { + fireTransFinishedListeners(); + } catch ( KettleException e ) { + // listeners produces errors + log.logError( BaseMessages.getString( PKG, "Trans.FinishListeners.Exception" ) ); + // we will not pass this exception up to prepareExecuton() entry point. + } finally { + // Flag the transformation as finished even if exception was thrown + setFinished( true ); + } + + // Pass along the log during preview. Otherwise it becomes hard to see + // what went wrong. + // + if ( preview ) { + String logText = KettleLogStore.getAppender().getBuffer( getLogChannelId(), true ).toString(); + throw new KettleException( BaseMessages.getString( PKG, "Trans.Log.FailToInitializeAtLeastOneStep" ) + Const.CR + + logText ); + } else { + throw new KettleException( BaseMessages.getString( PKG, "Trans.Log.FailToInitializeAtLeastOneStep" ) + + Const.CR ); + } + } + + log.snap( Metrics.METRIC_TRANSFORMATION_INIT_STOP ); + + KettleEnvironment.setExecutionInformation( this, repository ); + + readyToStart = true; + } + + @SuppressWarnings( "deprecation" ) + private void checkCompatibility() { + // If we don't have a previous result and transMeta does have one, someone has been using a deprecated method. + // + if ( transMeta.getPreviousResult() != null && getPreviousResult() == null ) { + setPreviousResult( transMeta.getPreviousResult() ); + } + + // If we don't have arguments set and TransMeta has, someone has been using a deprecated method. + // + if ( transMeta.getArguments() != null && getArguments() == null ) { + setArguments( transMeta.getArguments() ); + } + } + + /** + * Starts the threads prepared by prepareThreads(). Before you start the threads, you can add RowListeners to them. + * + * @throws KettleException + * if there is a communication error with a remote output socket. + */ + public void startThreads() throws KettleException { + // Now prepare to start all the threads... + // + nrOfFinishedSteps = 0; + nrOfActiveSteps = 0; + + ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.TransformationStartThreads.id, this ); + + fireTransStartedListeners(); + + for ( int i = 0; i < steps.size(); i++ ) { + final StepMetaDataCombi sid = steps.get( i ); + sid.step.markStart(); + sid.step.initBeforeStart(); + + // also attach a Step Listener to detect when we're done... + // + StepListener stepListener = new StepListener() { + public void stepActive( Trans trans, StepMeta stepMeta, StepInterface step ) { + nrOfActiveSteps++; + if ( nrOfActiveSteps == 1 ) { + // Transformation goes from in-active to active... + // PDI-5229 sync added + synchronized ( transListeners ) { + for ( TransListener listener : transListeners ) { + listener.transActive( Trans.this ); + } + } + } + } + + public void stepFinished( Trans trans, StepMeta stepMeta, StepInterface step ) { + synchronized ( Trans.this ) { + nrOfFinishedSteps++; + + if ( nrOfFinishedSteps >= steps.size() ) { + // Set the finished flag + // + setFinished( true ); + + // Grab the performance statistics one last time (if enabled) + // + addStepPerformanceSnapShot(); + + try { + fireTransFinishedListeners(); + } catch ( Exception e ) { + step.setErrors( step.getErrors() + 1L ); + log.logError( getName() + " : " + BaseMessages.getString( PKG, + "Trans.Log.UnexpectedErrorAtTransformationEnd" ), e ); + } + } + + // If a step fails with an error, we want to kill/stop the others + // too... + // + if ( step.getErrors() > 0 ) { + + log.logMinimal( BaseMessages.getString( PKG, "Trans.Log.TransformationDetectedErrors" ) ); + log.logMinimal( BaseMessages.getString( PKG, "Trans.Log.TransformationIsKillingTheOtherSteps" ) ); + + killAllNoWait(); + } + } + } + }; + // Make sure this is called first! + // + if ( sid.step instanceof BaseStep ) { + ( (BaseStep) sid.step ).getStepListeners().add( 0, stepListener ); + } else { + sid.step.addStepListener( stepListener ); + } + } + + if ( transMeta.isCapturingStepPerformanceSnapShots() ) { + stepPerformanceSnapshotSeqNr = new AtomicInteger( 0 ); + stepPerformanceSnapShots = new ConcurrentHashMap>(); + + // Calculate the maximum number of snapshots to be kept in memory + // + String limitString = environmentSubstitute( transMeta.getStepPerformanceCapturingSizeLimit() ); + if ( Const.isEmpty( limitString ) ) { + limitString = EnvUtil.getSystemProperty( Const.KETTLE_STEP_PERFORMANCE_SNAPSHOT_LIMIT ); + } + stepPerformanceSnapshotSizeLimit = Const.toInt( limitString, 0 ); + + // Set a timer to collect the performance data from the running threads... + // + stepPerformanceSnapShotTimer = new Timer( "stepPerformanceSnapShot Timer: " + transMeta.getName() ); + TimerTask timerTask = new TimerTask() { + public void run() { + if ( !isFinished() ) { + addStepPerformanceSnapShot(); + } + } + }; + stepPerformanceSnapShotTimer.schedule( timerTask, 100, transMeta.getStepPerformanceCapturingDelay() ); + } + + // Now start a thread to monitor the running transformation... + // + setFinished( false ); + paused.set( false ); + stopped.set( false ); + + transFinishedBlockingQueue = new ArrayBlockingQueue( 10 ); + + TransListener transListener = new TransAdapter() { + public void transFinished( Trans trans ) { + + try { + shutdownHeartbeat( trans != null ? trans.heartbeat : null ); + + ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.TransformationFinish.id, trans ); + } catch ( KettleException e ) { + throw new RuntimeException( "Error calling extension point at end of transformation", e ); + } + + // First of all, stop the performance snapshot timer if there is is + // one... + // + if ( transMeta.isCapturingStepPerformanceSnapShots() && stepPerformanceSnapShotTimer != null ) { + stepPerformanceSnapShotTimer.cancel(); + } + + setFinished( true ); + running = false; // no longer running + + log.snap( Metrics.METRIC_TRANSFORMATION_EXECUTION_STOP ); + + // If the user ran with metrics gathering enabled and a metrics logging table is configured, add another + // listener... + // + MetricsLogTable metricsLogTable = transMeta.getMetricsLogTable(); + if ( metricsLogTable.isDefined() ) { + try { + writeMetricsInformation(); + } catch ( Exception e ) { + log.logError( "Error writing metrics information", e ); + errors.incrementAndGet(); + } + } + + // Close the unique connections when running database transactionally. + // This will commit or roll back the transaction based on the result of this transformation. + // + if ( transMeta.isUsingUniqueConnections() ) { + trans.closeUniqueDatabaseConnections( getResult() ); + } + } + }; + // This should always be done first so that the other listeners achieve a clean state to start from (setFinished and + // so on) + // + transListeners.add( 0, transListener ); + + running = true; + + switch ( transMeta.getTransformationType() ) { + case Normal: + + // Now start all the threads... + // + for ( int i = 0; i < steps.size(); i++ ) { + final StepMetaDataCombi combi = steps.get( i ); + RunThread runThread = new RunThread( combi ); + Thread thread = new Thread( runThread ); + thread.setName( getName() + " - " + combi.stepname ); + ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.StepBeforeStart.id, combi ); + // Call an extension point at the end of the step + // + combi.step.addStepListener( new StepAdapter() { + + @Override + public void stepFinished( Trans trans, StepMeta stepMeta, StepInterface step ) { + try { + ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.StepFinished.id, combi ); + } catch ( KettleException e ) { + throw new RuntimeException( "Unexpected error in calling extension point upon step finish", e ); + } + } + + } ); + + thread.start(); + } + break; + + case SerialSingleThreaded: + new Thread( new Runnable() { + public void run() { + try { + // Always disable thread priority management, it will always slow us + // down... + // + for ( StepMetaDataCombi combi : steps ) { + combi.step.setUsingThreadPriorityManagment( false ); + } + + // + // This is a single threaded version... + // + + // Sort the steps from start to finish... + // + Collections.sort( steps, new Comparator() { + public int compare( StepMetaDataCombi c1, StepMetaDataCombi c2 ) { + + boolean c1BeforeC2 = transMeta.findPrevious( c2.stepMeta, c1.stepMeta ); + if ( c1BeforeC2 ) { + return -1; + } else { + return 1; + } + } + } ); + + boolean[] stepDone = new boolean[steps.size()]; + int nrDone = 0; + while ( nrDone < steps.size() && !isStopped() ) { + for ( int i = 0; i < steps.size() && !isStopped(); i++ ) { + StepMetaDataCombi combi = steps.get( i ); + if ( !stepDone[i] ) { + // if (combi.step.canProcessOneRow() || + // !combi.step.isRunning()) { + boolean cont = combi.step.processRow( combi.meta, combi.data ); + if ( !cont ) { + stepDone[i] = true; + nrDone++; + } + // } + } + } + } + } catch ( Exception e ) { + errors.addAndGet( 1 ); + log.logError( "Error executing single threaded", e ); + } finally { + for ( int i = 0; i < steps.size(); i++ ) { + StepMetaDataCombi combi = steps.get( i ); + combi.step.dispose( combi.meta, combi.data ); + combi.step.markStop(); + } + } + } + } ).start(); + break; + + case SingleThreaded: + // Don't do anything, this needs to be handled by the transformation + // executor! + // + break; + default: + break; + + } + + ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.TransformationStart.id, this ); + + heartbeat = startHeartbeat( getHeartbeatIntervalInSeconds() ); + + if ( log.isDetailed() ) { + log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.TransformationHasAllocated", String.valueOf( steps + .size() ), String.valueOf( rowsets.size() ) ) ); + } + } + + /** + * Make attempt to fire all registered listeners if possible. + * + * @throws KettleException + * if any errors occur during notification + */ + protected void fireTransFinishedListeners() throws KettleException { + // PDI-5229 sync added + synchronized ( transListeners ) { + if ( transListeners.size() == 0 ) { + return; + } + // prevent Exception from one listener to block others execution + List badGuys = new ArrayList( transListeners.size() ); + for ( TransListener transListener : transListeners ) { + try { + transListener.transFinished( this ); + } catch ( KettleException e ) { + badGuys.add( e ); + } + } + // Signal for the the waitUntilFinished blocker... + transFinishedBlockingQueue.add( new Object() ); + if ( !badGuys.isEmpty() ) { + // FIFO + throw new KettleException( badGuys.get( 0 ) ); + } + } + } + + /** + * Fires the start-event listeners (if any are registered). + * + * @throws KettleException + * if any errors occur during notification + */ + protected void fireTransStartedListeners() throws KettleException { + // PDI-5229 sync added + synchronized ( transListeners ) { + for ( TransListener transListener : transListeners ) { + transListener.transStarted( this ); + } + } + } + + /** + * Adds a step performance snapshot. + */ + protected void addStepPerformanceSnapShot() { + + if ( stepPerformanceSnapShots == null ) { + return; // Race condition somewhere? + } + + boolean pausedAndNotEmpty = isPaused() && !stepPerformanceSnapShots.isEmpty(); + boolean stoppedAndNotEmpty = isStopped() && !stepPerformanceSnapShots.isEmpty(); + + if ( transMeta.isCapturingStepPerformanceSnapShots() && !pausedAndNotEmpty && !stoppedAndNotEmpty ) { + // get the statistics from the steps and keep them... + // + int seqNr = stepPerformanceSnapshotSeqNr.incrementAndGet(); + for ( int i = 0; i < steps.size(); i++ ) { + StepMeta stepMeta = steps.get( i ).stepMeta; + StepInterface step = steps.get( i ).step; + + StepPerformanceSnapShot snapShot = + new StepPerformanceSnapShot( seqNr, getBatchId(), new Date(), getName(), stepMeta.getName(), step.getCopy(), + step.getLinesRead(), step.getLinesWritten(), step.getLinesInput(), step.getLinesOutput(), step + .getLinesUpdated(), step.getLinesRejected(), step.getErrors() ); + List snapShotList = stepPerformanceSnapShots.get( step.toString() ); + StepPerformanceSnapShot previous; + if ( snapShotList == null ) { + snapShotList = new ArrayList(); + stepPerformanceSnapShots.put( step.toString(), snapShotList ); + previous = null; + } else { + previous = snapShotList.get( snapShotList.size() - 1 ); // the last one... + } + // Make the difference... + // + snapShot.diff( previous, step.rowsetInputSize(), step.rowsetOutputSize() ); + synchronized ( stepPerformanceSnapShots ) { + snapShotList.add( snapShot ); + + if ( stepPerformanceSnapshotSizeLimit > 0 && snapShotList.size() > stepPerformanceSnapshotSizeLimit ) { + snapShotList.remove( 0 ); + } + } + } + + lastStepPerformanceSnapshotSeqNrAdded = stepPerformanceSnapshotSeqNr.get(); + } + } + + /** + * This method performs any cleanup operations, typically called after the transformation has finished. Specifically, + * after ALL the slave transformations in a clustered run have finished. + */ + public void cleanup() { + // Close all open server sockets. + // We can only close these after all processing has been confirmed to be finished. + // + if ( steps == null ) { + return; + } + + for ( StepMetaDataCombi combi : steps ) { + combi.step.cleanup(); + } + } + + /** + * Logs a summary message for the specified step. + * + * @param si + * the step interface + */ + public void logSummary( StepInterface si ) { + log.logBasic( si.getStepname(), BaseMessages.getString( PKG, "Trans.Log.FinishedProcessing", String.valueOf( si + .getLinesInput() ), String.valueOf( si.getLinesOutput() ), String.valueOf( si.getLinesRead() ) ) + BaseMessages + .getString( PKG, "Trans.Log.FinishedProcessing2", String.valueOf( si.getLinesWritten() ), String.valueOf( si + .getLinesUpdated() ), String.valueOf( si.getErrors() ) ) ); + } + + /** + * Waits until all RunThreads have finished. + */ + public void waitUntilFinished() { + try { + if ( transFinishedBlockingQueue == null ) { + return; + } + boolean wait = true; + while ( wait ) { + wait = transFinishedBlockingQueue.poll( 1, TimeUnit.DAYS ) == null; + } + } catch ( InterruptedException e ) { + throw new RuntimeException( "Waiting for transformation to be finished interrupted!", e ); + } + } + + /** + * Gets the number of errors that have occurred during execution of the transformation. + * + * @return the number of errors + */ + public int getErrors() { + int nrErrors = errors.get(); + + if ( steps == null ) { + return nrErrors; + } + + for ( int i = 0; i < steps.size(); i++ ) { + StepMetaDataCombi sid = steps.get( i ); + if ( sid.step.getErrors() != 0L ) { + nrErrors += sid.step.getErrors(); + } + } + if ( nrErrors > 0 ) { + log.logError( BaseMessages.getString( PKG, "Trans.Log.TransformationErrorsDetected" ) ); + } + + return nrErrors; + } + + /** + * Gets the number of steps in the transformation that are in an end state, such as Finished, Halted, or Stopped. + * + * @return the number of ended steps + */ + public int getEnded() { + int nrEnded = 0; + + if ( steps == null ) { + return 0; + } + + for ( int i = 0; i < steps.size(); i++ ) { + StepMetaDataCombi sid = steps.get( i ); + StepDataInterface data = sid.data; + + if ( ( sid.step != null && !sid.step.isRunning() ) + // Should normally not be needed anymore, status is kept in data. + || data.getStatus() == StepExecutionStatus.STATUS_FINISHED || // Finished processing + data.getStatus() == StepExecutionStatus.STATUS_HALTED || // Not launching because of init error + data.getStatus() == StepExecutionStatus.STATUS_STOPPED // Stopped because of an error + ) { + nrEnded++; + } + } + + return nrEnded; + } + + /** + * Checks if the transformation is finished\. + * + * @return true if the transformation is finished, false otherwise + */ + public boolean isFinished() { + return finished.get(); + } + + private void setFinished( boolean newValue ) { + finished.set( newValue ); + } + + public boolean isFinishedOrStopped() { + return isFinished() || isStopped(); + } + + /** + * Attempts to stops all running steps and subtransformations. If all steps have finished, the transformation is + * marked as Finished. + */ + public void killAll() { + if ( steps == null ) { + return; + } + + int nrStepsFinished = 0; + + for ( int i = 0; i < steps.size(); i++ ) { + StepMetaDataCombi sid = steps.get( i ); + + if ( log.isDebug() ) { + log.logDebug( BaseMessages.getString( PKG, "Trans.Log.LookingAtStep" ) + sid.step.getStepname() ); + } + + // If thr is a mapping, this is cause for an endless loop + // + while ( sid.step.isRunning() ) { + sid.step.stopAll(); + try { + Thread.sleep( 20 ); + } catch ( Exception e ) { + log.logError( BaseMessages.getString( PKG, "Trans.Log.TransformationErrors" ) + e.toString() ); + return; + } + } + + if ( !sid.step.isRunning() ) { + nrStepsFinished++; + } + } + + if ( nrStepsFinished == steps.size() ) { + setFinished( true ); + } + } + + /** + * Asks all steps to stop but doesn't wait around for it to happen. This is a special method for use with mappings. + */ + private void killAllNoWait() { + if ( steps == null ) { + return; + } + + for ( int i = 0; i < steps.size(); i++ ) { + StepMetaDataCombi sid = steps.get( i ); + StepInterface step = sid.step; + + if ( log.isDebug() ) { + log.logDebug( BaseMessages.getString( PKG, "Trans.Log.LookingAtStep" ) + step.getStepname() ); + } + + step.stopAll(); + try { + Thread.sleep( 20 ); + } catch ( Exception e ) { + log.logError( BaseMessages.getString( PKG, "Trans.Log.TransformationErrors" ) + e.toString() ); + return; + } + } + } + + /** + * Logs the execution statistics for the transformation for the specified time interval. If the total length of + * execution is supplied as the interval, then the statistics represent the average throughput (lines + * read/written/updated/rejected/etc. per second) for the entire execution. + * + * @param seconds + * the time interval (in seconds) + */ + public void printStats( int seconds ) { + log.logBasic( " " ); + if ( steps == null ) { + return; + } + + for ( int i = 0; i < steps.size(); i++ ) { + StepMetaDataCombi sid = steps.get( i ); + StepInterface step = sid.step; + long proc = step.getProcessed(); + if ( seconds != 0 ) { + if ( step.getErrors() == 0 ) { + log.logBasic( BaseMessages.getString( PKG, "Trans.Log.ProcessSuccessfullyInfo", step.getStepname(), "." + step + .getCopy(), String.valueOf( proc ), String.valueOf( ( proc / seconds ) ) ) ); + } else { + log.logError( BaseMessages.getString( PKG, "Trans.Log.ProcessErrorInfo", step.getStepname(), "." + step + .getCopy(), String.valueOf( step.getErrors() ), String.valueOf( proc ), String.valueOf( proc + / seconds ) ) ); + } + } else { + if ( step.getErrors() == 0 ) { + log.logBasic( BaseMessages.getString( PKG, "Trans.Log.ProcessSuccessfullyInfo", step.getStepname(), "." + step + .getCopy(), String.valueOf( proc ), seconds != 0 ? String.valueOf( ( proc / seconds ) ) : "-" ) ); + } else { + log.logError( BaseMessages.getString( PKG, "Trans.Log.ProcessErrorInfo2", step.getStepname(), "." + step + .getCopy(), String.valueOf( step.getErrors() ), String.valueOf( proc ), String.valueOf( seconds ) ) ); + } + } + } + } + + /** + * Gets a representable metric of the "processed" lines of the last step. + * + * @return the number of lines processed by the last step + */ + public long getLastProcessed() { + if ( steps == null || steps.size() == 0 ) { + return 0L; + } + StepMetaDataCombi sid = steps.get( steps.size() - 1 ); + return sid.step.getProcessed(); + } + + /** + * Finds the RowSet with the specified name. + * + * @param rowsetname + * the rowsetname + * @return the row set, or null if none found + */ + public RowSet findRowSet( String rowsetname ) { + // Start with the transformation. + for ( int i = 0; i < rowsets.size(); i++ ) { + // log.logDetailed("DIS: looking for RowSet ["+rowsetname+"] in nr "+i+" of "+threads.size()+" threads..."); + RowSet rs = rowsets.get( i ); + if ( rs.getName().equalsIgnoreCase( rowsetname ) ) { + return rs; + } + } + + return null; + } + + /** + * Finds the RowSet between two steps (or copies of steps). + * + * @param from + * the name of the "from" step + * @param fromcopy + * the copy number of the "from" step + * @param to + * the name of the "to" step + * @param tocopy + * the copy number of the "to" step + * @return the row set, or null if none found + */ + public RowSet findRowSet( String from, int fromcopy, String to, int tocopy ) { + // Start with the transformation. + for ( int i = 0; i < rowsets.size(); i++ ) { + RowSet rs = rowsets.get( i ); + if ( rs.getOriginStepName().equalsIgnoreCase( from ) && rs.getDestinationStepName().equalsIgnoreCase( to ) && rs + .getOriginStepCopy() == fromcopy && rs.getDestinationStepCopy() == tocopy ) { + return rs; + } + } + + return null; + } + + /** + * Checks whether the specified step (or step copy) has started. + * + * @param sname + * the step name + * @param copy + * the copy number + * @return true the specified step (or step copy) has started, false otherwise + */ + public boolean hasStepStarted( String sname, int copy ) { + // log.logDetailed("DIS: Checking wether of not ["+sname+"]."+cnr+" has started!"); + // log.logDetailed("DIS: hasStepStarted() looking in "+threads.size()+" threads"); + for ( int i = 0; i < steps.size(); i++ ) { + StepMetaDataCombi sid = steps.get( i ); + boolean started = ( sid.stepname != null && sid.stepname.equalsIgnoreCase( sname ) ) && sid.copy == copy; + if ( started ) { + return true; + } + } + return false; + } + + /** + * Stops all steps from running, and alerts any registered listeners. + */ + public void stopAll() { + if ( steps == null ) { + return; + } + + // log.logDetailed("DIS: Checking wether of not ["+sname+"]."+cnr+" has started!"); + // log.logDetailed("DIS: hasStepStarted() looking in "+threads.size()+" threads"); + for ( int i = 0; i < steps.size(); i++ ) { + StepMetaDataCombi sid = steps.get( i ); + StepInterface rt = sid.step; + rt.setStopped( true ); + rt.resumeRunning(); + + // Cancel queries etc. by force... + StepInterface si = rt; + try { + si.stopRunning( sid.meta, sid.data ); + } catch ( Exception e ) { + log.logError( "Something went wrong while trying to stop the transformation: " + e.toString() ); + log.logError( Const.getStackTracker( e ) ); + } + + sid.data.setStatus( StepExecutionStatus.STATUS_STOPPED ); + } + + // if it is stopped it is not paused + paused.set( false ); + stopped.set( true ); + + // Fire the stopped listener... + // + synchronized ( transStoppedListeners ) { + for ( TransStoppedListener listener : transStoppedListeners ) { + listener.transStopped( this ); + } + } + } + + /** + * Gets the number of steps in this transformation. + * + * @return the number of steps + */ + public int nrSteps() { + if ( steps == null ) { + return 0; + } + return steps.size(); + } + + /** + * Gets the number of active (i.e. not finished) steps in this transformation + * + * @return the number of active steps + */ + public int nrActiveSteps() { + if ( steps == null ) { + return 0; + } + + int nr = 0; + for ( int i = 0; i < steps.size(); i++ ) { + StepMetaDataCombi sid = steps.get( i ); + // without also considering a step status of not finished, + // the step execution results grid shows empty while + // the transformation has steps still running. + // if ( sid.step.isRunning() ) nr++; + if ( sid.step.isRunning() || sid.step.getStatus() != StepExecutionStatus.STATUS_FINISHED ) { + nr++; + } + } + return nr; + } + + /** + * Checks whether the transformation steps are running lookup. + * + * @return a boolean array associated with the step list, indicating whether that step is running a lookup. + */ + public boolean[] getTransStepIsRunningLookup() { + if ( steps == null ) { + return null; + } + + boolean[] tResult = new boolean[steps.size()]; + for ( int i = 0; i < steps.size(); i++ ) { + StepMetaDataCombi sid = steps.get( i ); + tResult[i] = ( sid.step.isRunning() || sid.step.getStatus() != StepExecutionStatus.STATUS_FINISHED ); + } + return tResult; + } + + /** + * Checks the execution status of each step in the transformations. + * + * @return an array associated with the step list, indicating the status of that step. + */ + public StepExecutionStatus[] getTransStepExecutionStatusLookup() { + if ( steps == null ) { + return null; + } + + // we need this snapshot for the TransGridDelegate refresh method to handle the + // difference between a timed refresh and continual step status updates + int totalSteps = steps.size(); + StepExecutionStatus[] tList = new StepExecutionStatus[totalSteps]; + for ( int i = 0; i < totalSteps; i++ ) { + StepMetaDataCombi sid = steps.get( i ); + tList[i] = sid.step.getStatus(); + } + return tList; + } + + /** + * Gets the run thread for the step at the specified index. + * + * @param i + * the index of the desired step + * @return a StepInterface object corresponding to the run thread for the specified step + */ + public StepInterface getRunThread( int i ) { + if ( steps == null ) { + return null; + } + return steps.get( i ).step; + } + + /** + * Gets the run thread for the step with the specified name and copy number. + * + * @param name + * the step name + * @param copy + * the copy number + * @return a StepInterface object corresponding to the run thread for the specified step + */ + public StepInterface getRunThread( String name, int copy ) { + if ( steps == null ) { + return null; + } + + for ( int i = 0; i < steps.size(); i++ ) { + StepMetaDataCombi sid = steps.get( i ); + StepInterface step = sid.step; + if ( step.getStepname().equalsIgnoreCase( name ) && step.getCopy() == copy ) { + return step; + } + } + + return null; + } + + /** + * Calculate the batch id and date range for the transformation. + * + * @throws KettleTransException + * if there are any errors during calculation + */ + public void calculateBatchIdAndDateRange() throws KettleTransException { + + TransLogTable transLogTable = transMeta.getTransLogTable(); + + currentDate = new Date(); + logDate = new Date(); + startDate = Const.MIN_DATE; + endDate = currentDate; + + DatabaseMeta logConnection = transLogTable.getDatabaseMeta(); + String logTable = environmentSubstitute( transLogTable.getActualTableName() ); + String logSchema = environmentSubstitute( transLogTable.getActualSchemaName() ); + + try { + if ( logConnection != null ) { + + String logSchemaAndTable = logConnection.getQuotedSchemaTableCombination( logSchema, logTable ); + if ( Const.isEmpty( logTable ) ) { + // It doesn't make sense to start database logging without a table + // to log to. + throw new KettleTransException( BaseMessages.getString( PKG, "Trans.Exception.NoLogTableDefined" ) ); + } + if ( Const.isEmpty( transMeta.getName() ) && logConnection != null && logTable != null ) { + throw new KettleException( BaseMessages.getString( PKG, "Trans.Exception.NoTransnameAvailableForLogging" ) ); + } + transLogTableDatabaseConnection = new Database( this, logConnection ); + transLogTableDatabaseConnection.shareVariablesWith( this ); + if ( log.isDetailed() ) { + log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.OpeningLogConnection", "" + logConnection ) ); + } + transLogTableDatabaseConnection.connect(); + transLogTableDatabaseConnection.setCommit( logCommitSize ); + + // See if we have to add a batch id... + // Do this first, before anything else to lock the complete table exclusively + // + if ( transLogTable.isBatchIdUsed() ) { + Long id_batch = + logConnection.getNextBatchId( transLogTableDatabaseConnection, logSchema, logTable, transLogTable + .getKeyField().getFieldName() ); + setBatchId( id_batch.longValue() ); + } + + // + // Get the date range from the logging table: from the last end_date to now. (currentDate) + // + Object[] lastr = + transLogTableDatabaseConnection.getLastLogDate( logSchemaAndTable, transMeta.getName(), false, + LogStatus.END ); + if ( lastr != null && lastr.length > 0 ) { + startDate = (Date) lastr[0]; + if ( log.isDetailed() ) { + log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.StartDateFound" ) + startDate ); + } + } + + // + // OK, we have a date-range. + // However, perhaps we need to look at a table before we make a final judgment? + // + if ( transMeta.getMaxDateConnection() != null && transMeta.getMaxDateTable() != null && transMeta + .getMaxDateTable().length() > 0 && transMeta.getMaxDateField() != null && transMeta.getMaxDateField() + .length() > 0 ) { + if ( log.isDetailed() ) { + log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.LookingForMaxdateConnection", "" + transMeta + .getMaxDateConnection() ) ); + } + DatabaseMeta maxcon = transMeta.getMaxDateConnection(); + if ( maxcon != null ) { + Database maxdb = new Database( this, maxcon ); + maxdb.shareVariablesWith( this ); + try { + if ( log.isDetailed() ) { + log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.OpeningMaximumDateConnection" ) ); + } + maxdb.connect(); + maxdb.setCommit( logCommitSize ); + + // + // Determine the endDate by looking at a field in a table... + // + String sql = "SELECT MAX(" + transMeta.getMaxDateField() + ") FROM " + transMeta.getMaxDateTable(); + RowMetaAndData r1 = maxdb.getOneRow( sql ); + if ( r1 != null ) { + // OK, we have a value, what's the offset? + Date maxvalue = r1.getRowMeta().getDate( r1.getData(), 0 ); + if ( maxvalue != null ) { + if ( log.isDetailed() ) { + log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.LastDateFoundOnTheMaxdateConnection" ) + + r1 ); + } + endDate.setTime( (long) ( maxvalue.getTime() + ( transMeta.getMaxDateOffset() * 1000 ) ) ); + } + } else { + if ( log.isDetailed() ) { + log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.NoLastDateFoundOnTheMaxdateConnection" ) ); + } + } + } catch ( KettleException e ) { + throw new KettleTransException( BaseMessages.getString( PKG, "Trans.Exception.ErrorConnectingToDatabase", + "" + transMeta.getMaxDateConnection() ), e ); + } finally { + maxdb.disconnect(); + } + } else { + throw new KettleTransException( BaseMessages.getString( PKG, + "Trans.Exception.MaximumDateConnectionCouldNotBeFound", "" + transMeta.getMaxDateConnection() ) ); + } + } + + // Determine the last date of all dependend tables... + // Get the maximum in depdate... + if ( transMeta.nrDependencies() > 0 ) { + if ( log.isDetailed() ) { + log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.CheckingForMaxDependencyDate" ) ); + } + // + // Maybe one of the tables where this transformation is dependent on has changed? + // If so we need to change the start-date! + // + depDate = Const.MIN_DATE; + Date maxdepdate = Const.MIN_DATE; + if ( lastr != null && lastr.length > 0 ) { + Date dep = (Date) lastr[1]; // #1: last depdate + if ( dep != null ) { + maxdepdate = dep; + depDate = dep; + } + } + + for ( int i = 0; i < transMeta.nrDependencies(); i++ ) { + TransDependency td = transMeta.getDependency( i ); + DatabaseMeta depcon = td.getDatabase(); + if ( depcon != null ) { + Database depdb = new Database( this, depcon ); + try { + depdb.connect(); + depdb.setCommit( logCommitSize ); + + String sql = "SELECT MAX(" + td.getFieldname() + ") FROM " + td.getTablename(); + RowMetaAndData r1 = depdb.getOneRow( sql ); + if ( r1 != null ) { + // OK, we have a row, get the result! + Date maxvalue = (Date) r1.getData()[0]; + if ( maxvalue != null ) { + if ( log.isDetailed() ) { + log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.FoundDateFromTable", td.getTablename(), + "." + td.getFieldname(), " = " + maxvalue.toString() ) ); + } + if ( maxvalue.getTime() > maxdepdate.getTime() ) { + maxdepdate = maxvalue; + } + } else { + throw new KettleTransException( BaseMessages.getString( PKG, + "Trans.Exception.UnableToGetDependencyInfoFromDB", td.getDatabase().getName() + ".", td + .getTablename() + ".", td.getFieldname() ) ); + } + } else { + throw new KettleTransException( BaseMessages.getString( PKG, + "Trans.Exception.UnableToGetDependencyInfoFromDB", td.getDatabase().getName() + ".", td + .getTablename() + ".", td.getFieldname() ) ); + } + } catch ( KettleException e ) { + throw new KettleTransException( BaseMessages.getString( PKG, "Trans.Exception.ErrorInDatabase", "" + td + .getDatabase() ), e ); + } finally { + depdb.disconnect(); + } + } else { + throw new KettleTransException( BaseMessages.getString( PKG, "Trans.Exception.ConnectionCouldNotBeFound", + "" + td.getDatabase() ) ); + } + if ( log.isDetailed() ) { + log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.Maxdepdate" ) + ( XMLHandler.date2string( + maxdepdate ) ) ); + } + } + + // OK, so we now have the maximum depdate; + // If it is larger, it means we have to read everything back in again. + // Maybe something has changed that we need! + // + if ( maxdepdate.getTime() > depDate.getTime() ) { + depDate = maxdepdate; + startDate = Const.MIN_DATE; + } + } else { + depDate = currentDate; + } + } + + // OK, now we have a date-range. See if we need to set a maximum! + if ( transMeta.getMaxDateDifference() > 0.0 && // Do we have a difference specified? + startDate.getTime() > Const.MIN_DATE.getTime() // Is the startdate > Minimum? + ) { + // See if the end-date is larger then Start_date + DIFF? + Date maxdesired = new Date( startDate.getTime() + ( (long) transMeta.getMaxDateDifference() * 1000 ) ); + + // If this is the case: lower the end-date. Pick up the next 'region' next time around. + // We do this to limit the workload in a single update session (e.g. for large fact tables) + // + if ( endDate.compareTo( maxdesired ) > 0 ) { + endDate = maxdesired; + } + } + + } catch ( KettleException e ) { + throw new KettleTransException( BaseMessages.getString( PKG, "Trans.Exception.ErrorCalculatingDateRange", + logTable ), e ); + } + + // Be careful, We DO NOT close the trans log table database connection!!! + // It's closed later in beginProcessing() to prevent excessive connect/disconnect repetitions. + + } + + /** + * Begin processing. Also handle logging operations related to the start of the transformation + * + * @throws KettleTransException + * the kettle trans exception + */ + public void beginProcessing() throws KettleTransException { + TransLogTable transLogTable = transMeta.getTransLogTable(); + int intervalInSeconds = Const.toInt( environmentSubstitute( transLogTable.getLogInterval() ), -1 ); + + try { + String logTable = transLogTable.getActualTableName(); + + SimpleDateFormat df = new SimpleDateFormat( REPLAY_DATE_FORMAT ); + log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.TransformationCanBeReplayed" ) + df.format( + currentDate ) ); + + try { + if ( transLogTableDatabaseConnection != null && !Const.isEmpty( logTable ) && !Const.isEmpty( transMeta + .getName() ) ) { + transLogTableDatabaseConnection.writeLogRecord( transLogTable, LogStatus.START, this, null ); + + // Pass in a commit to release transaction locks and to allow a user to actually see the log record. + // + if ( !transLogTableDatabaseConnection.isAutoCommit() ) { + transLogTableDatabaseConnection.commitLog( true, transLogTable ); + } + + // If we need to do periodic logging, make sure to install a timer for this... + // + if ( intervalInSeconds > 0 ) { + final Timer timer = new Timer( getName() + " - interval logging timer" ); + TimerTask timerTask = new TimerTask() { + public void run() { + try { + endProcessing(); + } catch ( Exception e ) { + log.logError( BaseMessages.getString( PKG, "Trans.Exception.UnableToPerformIntervalLogging" ), e ); + // Also stop the show... + // + errors.incrementAndGet(); + stopAll(); + } + } + }; + timer.schedule( timerTask, intervalInSeconds * 1000, intervalInSeconds * 1000 ); + + addTransListener( new TransAdapter() { + public void transFinished( Trans trans ) { + timer.cancel(); + } + } ); + } + + // Add a listener to make sure that the last record is also written when transformation finishes... + // + addTransListener( new TransAdapter() { + public void transFinished( Trans trans ) throws KettleException { + try { + endProcessing(); + + lastWrittenStepPerformanceSequenceNr = + writeStepPerformanceLogRecords( lastWrittenStepPerformanceSequenceNr, LogStatus.END ); + + } catch ( KettleException e ) { + throw new KettleException( BaseMessages.getString( PKG, + "Trans.Exception.UnableToPerformLoggingAtTransEnd" ), e ); + } + } + } ); + + } + + // If we need to write out the step logging information, do so at the end of the transformation too... + // + StepLogTable stepLogTable = transMeta.getStepLogTable(); + if ( stepLogTable.isDefined() ) { + addTransListener( new TransAdapter() { + public void transFinished( Trans trans ) throws KettleException { + try { + writeStepLogInformation(); + } catch ( KettleException e ) { + throw new KettleException( BaseMessages.getString( PKG, + "Trans.Exception.UnableToPerformLoggingAtTransEnd" ), e ); + } + } + } ); + } + + // If we need to write the log channel hierarchy and lineage information, add a listener for that too... + // + ChannelLogTable channelLogTable = transMeta.getChannelLogTable(); + if ( channelLogTable.isDefined() ) { + addTransListener( new TransAdapter() { + public void transFinished( Trans trans ) throws KettleException { + try { + writeLogChannelInformation(); + } catch ( KettleException e ) { + throw new KettleException( BaseMessages.getString( PKG, + "Trans.Exception.UnableToPerformLoggingAtTransEnd" ), e ); + } + } + } ); + } + + // See if we need to write the step performance records at intervals too... + // + PerformanceLogTable performanceLogTable = transMeta.getPerformanceLogTable(); + int perfLogInterval = Const.toInt( environmentSubstitute( performanceLogTable.getLogInterval() ), -1 ); + if ( performanceLogTable.isDefined() && perfLogInterval > 0 ) { + final Timer timer = new Timer( getName() + " - step performance log interval timer" ); + TimerTask timerTask = new TimerTask() { + public void run() { + try { + lastWrittenStepPerformanceSequenceNr = + writeStepPerformanceLogRecords( lastWrittenStepPerformanceSequenceNr, LogStatus.RUNNING ); + } catch ( Exception e ) { + log.logError( BaseMessages.getString( PKG, + "Trans.Exception.UnableToPerformIntervalPerformanceLogging" ), e ); + // Also stop the show... + // + errors.incrementAndGet(); + stopAll(); + } + } + }; + timer.schedule( timerTask, perfLogInterval * 1000, perfLogInterval * 1000 ); + + addTransListener( new TransAdapter() { + public void transFinished( Trans trans ) { + timer.cancel(); + } + } ); + } + } catch ( KettleException e ) { + throw new KettleTransException( BaseMessages.getString( PKG, "Trans.Exception.ErrorWritingLogRecordToTable", + logTable ), e ); + } finally { + // If we use interval logging, we keep the connection open for performance reasons... + // + if ( transLogTableDatabaseConnection != null && ( intervalInSeconds <= 0 ) ) { + transLogTableDatabaseConnection.disconnect(); + transLogTableDatabaseConnection = null; + } + } + } catch ( KettleException e ) { + throw new KettleTransException( BaseMessages.getString( PKG, + "Trans.Exception.UnableToBeginProcessingTransformation" ), e ); + } + } + + /** + * Writes log channel information to a channel logging table (if one has been configured). + * + * @throws KettleException + * if any errors occur during logging + */ + protected void writeLogChannelInformation() throws KettleException { + Database db = null; + ChannelLogTable channelLogTable = transMeta.getChannelLogTable(); + + // PDI-7070: If parent trans or job has the same channel logging info, don't duplicate log entries + Trans t = getParentTrans(); + if ( t != null ) { + if ( channelLogTable.equals( t.getTransMeta().getChannelLogTable() ) ) { + return; + } + } + + Job j = getParentJob(); + + if ( j != null ) { + if ( channelLogTable.equals( j.getJobMeta().getChannelLogTable() ) ) { + return; + } + } + // end PDI-7070 + + try { + db = new Database( this, channelLogTable.getDatabaseMeta() ); + db.shareVariablesWith( this ); + db.connect(); + db.setCommit( logCommitSize ); + + List loggingHierarchyList = getLoggingHierarchy(); + for ( LoggingHierarchy loggingHierarchy : loggingHierarchyList ) { + db.writeLogRecord( channelLogTable, LogStatus.START, loggingHierarchy, null ); + } + + // Also time-out the log records in here... + // + db.cleanupLogRecords( channelLogTable ); + } catch ( Exception e ) { + throw new KettleException( BaseMessages.getString( PKG, + "Trans.Exception.UnableToWriteLogChannelInformationToLogTable" ), e ); + } finally { + if ( !db.isAutoCommit() ) { + db.commit( true ); + } + db.disconnect(); + } + } + + /** + * Writes step information to a step logging table (if one has been configured). + * + * @throws KettleException + * if any errors occur during logging + */ + protected void writeStepLogInformation() throws KettleException { + Database db = null; + StepLogTable stepLogTable = getTransMeta().getStepLogTable(); + try { + db = createDataBase( stepLogTable.getDatabaseMeta() ); + db.shareVariablesWith( this ); + db.connect(); + db.setCommit( logCommitSize ); + + for ( StepMetaDataCombi combi : getSteps() ) { + db.writeLogRecord( stepLogTable, LogStatus.START, combi, null ); + } + + db.cleanupLogRecords( stepLogTable ); + } catch ( Exception e ) { + throw new KettleException( BaseMessages.getString( PKG, + "Trans.Exception.UnableToWriteStepInformationToLogTable" ), e ); + } finally { + if ( !db.isAutoCommit() ) { + db.commit( true ); + } + db.disconnect(); + } + + } + + protected Database createDataBase( DatabaseMeta meta ) { + return new Database( this, meta ); + } + + protected synchronized void writeMetricsInformation() throws KettleException { + // + List metricsList = + MetricsUtil.getDuration( log.getLogChannelId(), Metrics.METRIC_PLUGIN_REGISTRY_REGISTER_EXTENSIONS_START ); + if ( !metricsList.isEmpty() ) { + System.out.println( metricsList.get( 0 ) ); + } + + metricsList = + MetricsUtil.getDuration( log.getLogChannelId(), Metrics.METRIC_PLUGIN_REGISTRY_PLUGIN_REGISTRATION_START ); + if ( !metricsList.isEmpty() ) { + System.out.println( metricsList.get( 0 ) ); + } + + long total = 0; + metricsList = + MetricsUtil.getDuration( log.getLogChannelId(), Metrics.METRIC_PLUGIN_REGISTRY_PLUGIN_TYPE_REGISTRATION_START ); + if ( metricsList != null ) { + for ( MetricsDuration duration : metricsList ) { + total += duration.getDuration(); + System.out.println( " - " + duration.toString() + " Total=" + total ); + } + } + + Database db = null; + MetricsLogTable metricsLogTable = transMeta.getMetricsLogTable(); + try { + db = new Database( this, metricsLogTable.getDatabaseMeta() ); + db.shareVariablesWith( this ); + db.connect(); + db.setCommit( logCommitSize ); + + List logChannelIds = LoggingRegistry.getInstance().getLogChannelChildren( getLogChannelId() ); + for ( String logChannelId : logChannelIds ) { + Deque snapshotList = + MetricsRegistry.getInstance().getSnapshotLists().get( logChannelId ); + if ( snapshotList != null ) { + Iterator iterator = snapshotList.iterator(); + while ( iterator.hasNext() ) { + MetricsSnapshotInterface snapshot = iterator.next(); + db.writeLogRecord( metricsLogTable, LogStatus.START, new LoggingMetric( batchId, snapshot ), null ); + } + } + + Map snapshotMap = + MetricsRegistry.getInstance().getSnapshotMaps().get( logChannelId ); + if ( snapshotMap != null ) { + synchronized ( snapshotMap ) { + Iterator iterator = snapshotMap.values().iterator(); + while ( iterator.hasNext() ) { + MetricsSnapshotInterface snapshot = iterator.next(); + db.writeLogRecord( metricsLogTable, LogStatus.START, new LoggingMetric( batchId, snapshot ), null ); + } + } + } + } + + // Also time-out the log records in here... + // + db.cleanupLogRecords( metricsLogTable ); + } catch ( Exception e ) { + throw new KettleException( BaseMessages.getString( PKG, + "Trans.Exception.UnableToWriteMetricsInformationToLogTable" ), e ); + } finally { + if ( !db.isAutoCommit() ) { + db.commit( true ); + } + db.disconnect(); + } + } + + /** + * Gets the result of the transformation. The Result object contains such measures as the number of errors, number of + * lines read/written/input/output/updated/rejected, etc. + * + * @return the Result object containing resulting measures from execution of the transformation + */ + public Result getResult() { + if ( steps == null ) { + return null; + } + + Result result = new Result(); + result.setNrErrors( errors.longValue() ); + result.setResult( errors.longValue() == 0 ); + TransLogTable transLogTable = transMeta.getTransLogTable(); + + for ( int i = 0; i < steps.size(); i++ ) { + StepMetaDataCombi sid = steps.get( i ); + StepInterface step = sid.step; + + result.setNrErrors( result.getNrErrors() + sid.step.getErrors() ); + result.getResultFiles().putAll( step.getResultFiles() ); + + if ( step.getStepname().equals( transLogTable.getSubjectString( TransLogTable.ID.LINES_READ ) ) ) { + result.setNrLinesRead( result.getNrLinesRead() + step.getLinesRead() ); + } + if ( step.getStepname().equals( transLogTable.getSubjectString( TransLogTable.ID.LINES_INPUT ) ) ) { + result.setNrLinesInput( result.getNrLinesInput() + step.getLinesInput() ); + } + if ( step.getStepname().equals( transLogTable.getSubjectString( TransLogTable.ID.LINES_WRITTEN ) ) ) { + result.setNrLinesWritten( result.getNrLinesWritten() + step.getLinesWritten() ); + } + if ( step.getStepname().equals( transLogTable.getSubjectString( TransLogTable.ID.LINES_OUTPUT ) ) ) { + result.setNrLinesOutput( result.getNrLinesOutput() + step.getLinesOutput() ); + } + if ( step.getStepname().equals( transLogTable.getSubjectString( TransLogTable.ID.LINES_UPDATED ) ) ) { + result.setNrLinesUpdated( result.getNrLinesUpdated() + step.getLinesUpdated() ); + } + if ( step.getStepname().equals( transLogTable.getSubjectString( TransLogTable.ID.LINES_REJECTED ) ) ) { + result.setNrLinesRejected( result.getNrLinesRejected() + step.getLinesRejected() ); + } + } + + result.setRows( resultRows ); + if ( !Const.isEmpty( resultFiles ) ) { + result.setResultFiles( new HashMap() ); + for ( ResultFile resultFile : resultFiles ) { + result.getResultFiles().put( resultFile.toString(), resultFile ); + } + } + result.setStopped( isStopped() ); + result.setLogChannelId( log.getLogChannelId() ); + + return result; + } + + /** + * End processing. Also handle any logging operations associated with the end of a transformation + * + * @return true if all end processing is successful, false otherwise + * @throws KettleException + * if any errors occur during processing + */ + private synchronized boolean endProcessing() throws KettleException { + LogStatus status; + + if ( isFinished() ) { + if ( isStopped() ) { + status = LogStatus.STOP; + } else { + status = LogStatus.END; + } + } else if ( isPaused() ) { + status = LogStatus.PAUSED; + } else { + status = LogStatus.RUNNING; + } + + TransLogTable transLogTable = transMeta.getTransLogTable(); + int intervalInSeconds = Const.toInt( environmentSubstitute( transLogTable.getLogInterval() ), -1 ); + + logDate = new Date(); + + // OK, we have some logging to do... + // + DatabaseMeta logcon = transMeta.getTransLogTable().getDatabaseMeta(); + String logTable = transMeta.getTransLogTable().getActualTableName(); + if ( logcon != null ) { + Database ldb = null; + + try { + // Let's not reconnect/disconnect all the time for performance reasons! + // + if ( transLogTableDatabaseConnection == null ) { + ldb = new Database( this, logcon ); + ldb.shareVariablesWith( this ); + ldb.connect(); + ldb.setCommit( logCommitSize ); + transLogTableDatabaseConnection = ldb; + } else { + ldb = transLogTableDatabaseConnection; + } + + // Write to the standard transformation log table... + // + if ( !Const.isEmpty( logTable ) ) { + ldb.writeLogRecord( transLogTable, status, this, null ); + } + + // Also time-out the log records in here... + // + if ( status.equals( LogStatus.END ) || status.equals( LogStatus.STOP ) ) { + ldb.cleanupLogRecords( transLogTable ); + } + + // Commit the operations to prevent locking issues + // + if ( !ldb.isAutoCommit() ) { + ldb.commitLog( true, transMeta.getTransLogTable() ); + } + } catch ( KettleDatabaseException e ) { + // PDI-9790 error write to log db is transaction error + log.logError( BaseMessages.getString( PKG, "Database.Error.WriteLogTable", logTable ), e ); + errors.incrementAndGet(); + // end PDI-9790 + } catch ( Exception e ) { + throw new KettleException( BaseMessages.getString( PKG, "Trans.Exception.ErrorWritingLogRecordToTable", + transMeta.getTransLogTable().getActualTableName() ), e ); + } finally { + if ( intervalInSeconds <= 0 || ( status.equals( LogStatus.END ) || status.equals( LogStatus.STOP ) ) ) { + ldb.disconnect(); + transLogTableDatabaseConnection = null; // disconnected + } + } + } + return true; + } + + /** + * Write step performance log records. + * + * @param startSequenceNr + * the start sequence numberr + * @param status + * the logging status. If this is End, perform cleanup + * @return the new sequence number + * @throws KettleException + * if any errors occur during logging + */ + private int writeStepPerformanceLogRecords( int startSequenceNr, LogStatus status ) throws KettleException { + int lastSeqNr = 0; + Database ldb = null; + PerformanceLogTable performanceLogTable = transMeta.getPerformanceLogTable(); + + if ( !performanceLogTable.isDefined() || !transMeta.isCapturingStepPerformanceSnapShots() + || stepPerformanceSnapShots == null || stepPerformanceSnapShots.isEmpty() ) { + return 0; // nothing to do here! + } + + try { + ldb = new Database( this, performanceLogTable.getDatabaseMeta() ); + ldb.shareVariablesWith( this ); + ldb.connect(); + ldb.setCommit( logCommitSize ); + + // Write to the step performance log table... + // + RowMetaInterface rowMeta = performanceLogTable.getLogRecord( LogStatus.START, null, null ).getRowMeta(); + ldb.prepareInsert( rowMeta, performanceLogTable.getActualSchemaName(), performanceLogTable.getActualTableName() ); + + synchronized ( stepPerformanceSnapShots ) { + Iterator> iterator = stepPerformanceSnapShots.values().iterator(); + while ( iterator.hasNext() ) { + List snapshots = iterator.next(); + synchronized ( snapshots ) { + Iterator snapshotsIterator = snapshots.iterator(); + while ( snapshotsIterator.hasNext() ) { + StepPerformanceSnapShot snapshot = snapshotsIterator.next(); + if ( snapshot.getSeqNr() >= startSequenceNr && snapshot + .getSeqNr() <= lastStepPerformanceSnapshotSeqNrAdded ) { + + RowMetaAndData row = performanceLogTable.getLogRecord( LogStatus.START, snapshot, null ); + + ldb.setValuesInsert( row.getRowMeta(), row.getData() ); + ldb.insertRow( true ); + } + lastSeqNr = snapshot.getSeqNr(); + } + } + } + } + + ldb.insertFinished( true ); + + // Finally, see if the log table needs cleaning up... + // + if ( status.equals( LogStatus.END ) ) { + ldb.cleanupLogRecords( performanceLogTable ); + } + + } catch ( Exception e ) { + throw new KettleException( BaseMessages.getString( PKG, + "Trans.Exception.ErrorWritingStepPerformanceLogRecordToTable" ), e ); + } finally { + if ( ldb != null ) { + ldb.disconnect(); + } + } + + return lastSeqNr + 1; + } + + /** + * Close unique database connections. If there are errors in the Result, perform a rollback + * + * @param result + * the result of the transformation execution + */ + private void closeUniqueDatabaseConnections( Result result ) { + + // Don't close any connections if the parent job is using the same transaction + // + if ( parentJob != null && transactionId != null && parentJob.getTransactionId() != null && transactionId.equals( + parentJob.getTransactionId() ) ) { + return; + } + + // Don't close any connections if the parent transformation is using the same transaction + // + if ( parentTrans != null && parentTrans.getTransMeta().isUsingUniqueConnections() && transactionId != null + && parentTrans.getTransactionId() != null && transactionId.equals( parentTrans.getTransactionId() ) ) { + return; + } + + // First we get all the database connections ... + // + DatabaseConnectionMap map = DatabaseConnectionMap.getInstance(); + synchronized ( map ) { + List databaseList = new ArrayList( map.getMap().values() ); + for ( Database database : databaseList ) { + if ( database.getConnectionGroup().equals( getTransactionId() ) ) { + try { + // This database connection belongs to this transformation. + // Let's roll it back if there is an error... + // + if ( result.getNrErrors() > 0 ) { + try { + database.rollback( true ); + log.logBasic( BaseMessages.getString( PKG, "Trans.Exception.TransactionsRolledBackOnConnection", + database.toString() ) ); + } catch ( Exception e ) { + throw new KettleDatabaseException( BaseMessages.getString( PKG, + "Trans.Exception.ErrorRollingBackUniqueConnection", database.toString() ), e ); + } + } else { + try { + database.commit( true ); + log.logBasic( BaseMessages.getString( PKG, "Trans.Exception.TransactionsCommittedOnConnection", database + .toString() ) ); + } catch ( Exception e ) { + throw new KettleDatabaseException( BaseMessages.getString( PKG, + "Trans.Exception.ErrorCommittingUniqueConnection", database.toString() ), e ); + } + } + } catch ( Exception e ) { + log.logError( BaseMessages.getString( PKG, "Trans.Exception.ErrorHandlingTransformationTransaction", + database.toString() ), e ); + result.setNrErrors( result.getNrErrors() + 1 ); + } finally { + try { + // This database connection belongs to this transformation. + database.closeConnectionOnly(); + } catch ( Exception e ) { + log.logError( BaseMessages.getString( PKG, "Trans.Exception.ErrorHandlingTransformationTransaction", + database.toString() ), e ); + result.setNrErrors( result.getNrErrors() + 1 ); + } finally { + // Remove the database from the list... + // + map.removeConnection( database.getConnectionGroup(), database.getPartitionId(), database ); + } + } + } + } + + // Who else needs to be informed of the rollback or commit? + // + List transactionListeners = map.getTransactionListeners( getTransactionId() ); + if ( result.getNrErrors() > 0 ) { + for ( DatabaseTransactionListener listener : transactionListeners ) { + try { + listener.rollback(); + } catch ( Exception e ) { + log.logError( BaseMessages.getString( PKG, "Trans.Exception.ErrorHandlingTransactionListenerRollback" ), + e ); + result.setNrErrors( result.getNrErrors() + 1 ); + } + } + } else { + for ( DatabaseTransactionListener listener : transactionListeners ) { + try { + listener.commit(); + } catch ( Exception e ) { + log.logError( BaseMessages.getString( PKG, "Trans.Exception.ErrorHandlingTransactionListenerCommit" ), e ); + result.setNrErrors( result.getNrErrors() + 1 ); + } + } + } + + } + } + + /** + * Find the run thread for the step with the specified name. + * + * @param stepname + * the step name + * @return a StepInterface object corresponding to the run thread for the specified step + */ + public StepInterface findRunThread( String stepname ) { + if ( steps == null ) { + return null; + } + + for ( int i = 0; i < steps.size(); i++ ) { + StepMetaDataCombi sid = steps.get( i ); + StepInterface step = sid.step; + if ( step.getStepname().equalsIgnoreCase( stepname ) ) { + return step; + } + } + return null; + } + + /** + * Find the base steps for the step with the specified name. + * + * @param stepname + * the step name + * @return the list of base steps for the specified step + */ + public List findBaseSteps( String stepname ) { + List baseSteps = new ArrayList(); + + if ( steps == null ) { + return baseSteps; + } + + for ( int i = 0; i < steps.size(); i++ ) { + StepMetaDataCombi sid = steps.get( i ); + StepInterface stepInterface = sid.step; + if ( stepInterface.getStepname().equalsIgnoreCase( stepname ) ) { + baseSteps.add( stepInterface ); + } + } + return baseSteps; + } + + /** + * Find the executing step copy for the step with the specified name and copy number + * + * @param stepname + * the step name + * @param copynr + * @return the executing step found or null if no copy could be found. + */ + public StepInterface findStepInterface( String stepname, int copyNr ) { + if ( steps == null ) { + return null; + } + + for ( int i = 0; i < steps.size(); i++ ) { + StepMetaDataCombi sid = steps.get( i ); + StepInterface stepInterface = sid.step; + if ( stepInterface.getStepname().equalsIgnoreCase( stepname ) && sid.copy == copyNr ) { + return stepInterface; + } + } + return null; + } + + /** + * Find the available executing step copies for the step with the specified name + * + * @param stepname + * the step name + * @param copynr + * @return the list of executing step copies found or null if no steps are available yet (incorrect usage) + */ + public List findStepInterfaces( String stepname ) { + if ( steps == null ) { + return null; + } + + List list = new ArrayList(); + + for ( int i = 0; i < steps.size(); i++ ) { + StepMetaDataCombi sid = steps.get( i ); + StepInterface stepInterface = sid.step; + if ( stepInterface.getStepname().equalsIgnoreCase( stepname ) ) { + list.add( stepInterface ); + } + } + return list; + } + + /** + * Find the data interface for the step with the specified name. + * + * @param name + * the step name + * @return the step data interface + */ + public StepDataInterface findDataInterface( String name ) { + if ( steps == null ) { + return null; + } + + for ( int i = 0; i < steps.size(); i++ ) { + StepMetaDataCombi sid = steps.get( i ); + StepInterface rt = sid.step; + if ( rt.getStepname().equalsIgnoreCase( name ) ) { + return sid.data; + } + } + return null; + } + + /** + * Gets the start date/time object for the transformation. + * + * @return Returns the startDate. + */ + public Date getStartDate() { + return startDate; + } + + /** + * Gets the end date/time object for the transformation. + * + * @return Returns the endDate. + */ + public Date getEndDate() { + return endDate; + } + + /** + * Checks whether the running transformation is being monitored. + * + * @return true the running transformation is being monitored, false otherwise + */ + public boolean isMonitored() { + return monitored; + } + + /** + * Sets whether the running transformation should be monitored. + * + * @param monitored + * true if the running transformation should be monitored, false otherwise + */ + public void setMonitored( boolean monitored ) { + this.monitored = monitored; + } + + /** + * Gets the meta-data for the transformation. + * + * @return Returns the transformation meta-data + */ + public TransMeta getTransMeta() { + return transMeta; + } + + /** + * Sets the meta-data for the transformation. + * + * @param transMeta + * The transformation meta-data to set. + */ + public void setTransMeta( TransMeta transMeta ) { + this.transMeta = transMeta; + } + + /** + * Gets the current date/time object. + * + * @return the current date + */ + public Date getCurrentDate() { + return currentDate; + } + + /** + * Gets the dependency date for the transformation. A transformation can have a list of dependency fields. If any of + * these fields have a maximum date higher than the dependency date of the last run, the date range is set to to (-oo, + * now). The use-case is the incremental population of Slowly Changing Dimensions (SCD). + * + * @return Returns the dependency date + */ + public Date getDepDate() { + return depDate; + } + + /** + * Gets the date the transformation was logged. + * + * @return the log date + */ + public Date getLogDate() { + return logDate; + } + + /** + * Gets the rowsets for the transformation. + * + * @return a list of rowsets + */ + public List getRowsets() { + return rowsets; + } + + /** + * Gets a list of steps in the transformation. + * + * @return a list of the steps in the transformation + */ + public List getSteps() { + return steps; + } + + /** + * Gets a string representation of the transformation. + * + * @return the string representation of the transformation + * @see java.lang.Object#toString() + */ + public String toString() { + if ( transMeta == null || transMeta.getName() == null ) { + return getClass().getSimpleName(); + } + + // See if there is a parent transformation. If so, print the name of the parent here as well... + // + StringBuffer string = new StringBuffer(); + + // If we're running as a mapping, we get a reference to the calling (parent) transformation as well... + // + if ( getParentTrans() != null ) { + string.append( '[' ).append( getParentTrans().toString() ).append( ']' ).append( '.' ); + } + + // When we run a mapping we also set a mapping step name in there... + // + if ( !Const.isEmpty( mappingStepName ) ) { + string.append( '[' ).append( mappingStepName ).append( ']' ).append( '.' ); + } + + string.append( transMeta.getName() ); + + return string.toString(); + } + + /** + * Gets the mapping inputs for each step in the transformation. + * + * @return an array of MappingInputs + */ + public MappingInput[] findMappingInput() { + if ( steps == null ) { + return null; + } + + List list = new ArrayList(); + + // Look in threads and find the MappingInput step thread... + for ( int i = 0; i < steps.size(); i++ ) { + StepMetaDataCombi smdc = steps.get( i ); + StepInterface step = smdc.step; + if ( step.getStepID().equalsIgnoreCase( "MappingInput" ) ) { + list.add( (MappingInput) step ); + } + } + return list.toArray( new MappingInput[list.size()] ); + } + + /** + * Gets the mapping outputs for each step in the transformation. + * + * @return an array of MappingOutputs + */ + public MappingOutput[] findMappingOutput() { + List list = new ArrayList(); + + if ( steps != null ) { + // Look in threads and find the MappingInput step thread... + for ( int i = 0; i < steps.size(); i++ ) { + StepMetaDataCombi smdc = steps.get( i ); + StepInterface step = smdc.step; + if ( step.getStepID().equalsIgnoreCase( "MappingOutput" ) ) { + list.add( (MappingOutput) step ); + } + } + } + return list.toArray( new MappingOutput[list.size()] ); + } + + /** + * Find the StepInterface (thread) by looking it up using the name. + * + * @param stepname + * The name of the step to look for + * @param copy + * the copy number of the step to look for + * @return the StepInterface or null if nothing was found. + */ + public StepInterface getStepInterface( String stepname, int copy ) { + if ( steps == null ) { + return null; + } + + // Now start all the threads... + for ( int i = 0; i < steps.size(); i++ ) { + StepMetaDataCombi sid = steps.get( i ); + if ( sid.stepname.equalsIgnoreCase( stepname ) && sid.copy == copy ) { + return sid.step; + } + } + + return null; + } + + /** + * Gets the replay date. The replay date is used to indicate that the transformation was replayed (re-tried, run + * again) with that particular replay date. You can use this in Text File/Excel Input to allow you to save error line + * numbers into a file (SOURCE_FILE.line for example) During replay, only the lines that have errors in them are + * passed to the next steps, the other lines are ignored. This is for the use case: if the document contained errors + * (bad dates, chars in numbers, etc), you simply send the document back to the source (the user/departement that + * created it probably) and when you get it back, re-run the last transformation. + * + * @return the replay date + */ + public Date getReplayDate() { + return replayDate; + } + + /** + * Sets the replay date. The replay date is used to indicate that the transformation was replayed (re-tried, run + * again) with that particular replay date. You can use this in Text File/Excel Input to allow you to save error line + * numbers into a file (SOURCE_FILE.line for example) During replay, only the lines that have errors in them are + * passed to the next steps, the other lines are ignored. This is for the use case: if the document contained errors + * (bad dates, chars in numbers, etc), you simply send the document back to the source (the user/departement that + * created it probably) and when you get it back, re-run the last transformation. + * + * @param replayDate + * the new replay date + */ + public void setReplayDate( Date replayDate ) { + this.replayDate = replayDate; + } + + /** + * Turn on safe mode during running: the transformation will run slower but with more checking enabled. + * + * @param safeModeEnabled + * true for safe mode + */ + public void setSafeModeEnabled( boolean safeModeEnabled ) { + this.safeModeEnabled = safeModeEnabled; + } + + /** + * Checks whether safe mode is enabled. + * + * @return Returns true if the safe mode is enabled: the transformation will run slower but with more checking enabled + */ + public boolean isSafeModeEnabled() { + return safeModeEnabled; + } + + /** + * This adds a row producer to the transformation that just got set up. It is preferable to run this BEFORE execute() + * but after prepareExecution() + * + * @param stepname + * The step to produce rows for + * @param copynr + * The copynr of the step to produce row for (normally 0 unless you have multiple copies running) + * @return the row producer + * @throws KettleException + * in case the thread/step to produce rows for could not be found. + * @see Trans#execute(String[]) + * @see Trans#prepareExecution(String[]) + */ + public RowProducer addRowProducer( String stepname, int copynr ) throws KettleException { + StepInterface stepInterface = getStepInterface( stepname, copynr ); + if ( stepInterface == null ) { + throw new KettleException( "Unable to find thread with name " + stepname + " and copy number " + copynr ); + } + + // We are going to add an extra RowSet to this stepInterface. + RowSet rowSet; + switch ( transMeta.getTransformationType() ) { + case Normal: + rowSet = new BlockingRowSet( transMeta.getSizeRowset() ); + break; + case SerialSingleThreaded: + rowSet = new SingleRowRowSet(); + break; + case SingleThreaded: + rowSet = new QueueRowSet(); + break; + default: + throw new KettleException( "Unhandled transformation type: " + transMeta.getTransformationType() ); + } + + // Add this rowset to the list of active rowsets for the selected step + stepInterface.getInputRowSets().add( rowSet ); + + return new RowProducer( stepInterface, rowSet ); + } + + /** + * Gets the parent job, or null if there is no parent. + * + * @return the parent job, or null if there is no parent + */ + public Job getParentJob() { + return parentJob; + } + + /** + * Sets the parent job for the transformation. + * + * @param parentJob + * The parent job to set + */ + public void setParentJob( Job parentJob ) { + this.logLevel = parentJob.getLogLevel(); + this.log.setLogLevel( logLevel ); + this.parentJob = parentJob; + + transactionId = calculateTransactionId(); + } + + /** + * Finds the StepDataInterface (currently) associated with the specified step. + * + * @param stepname + * The name of the step to look for + * @param stepcopy + * The copy number (0 based) of the step + * @return The StepDataInterface or null if non found. + */ + public StepDataInterface getStepDataInterface( String stepname, int stepcopy ) { + if ( steps == null ) { + return null; + } + + for ( int i = 0; i < steps.size(); i++ ) { + StepMetaDataCombi sid = steps.get( i ); + if ( sid.stepname.equals( stepname ) && sid.copy == stepcopy ) { + return sid.data; + } + } + return null; + } + + /** + * Checks whether the transformation has any steps that are halted. + * + * @return true if one or more steps are halted, false otherwise + */ + public boolean hasHaltedSteps() { + // not yet 100% sure of this, if there are no steps... or none halted? + if ( steps == null ) { + return false; + } + + for ( int i = 0; i < steps.size(); i++ ) { + StepMetaDataCombi sid = steps.get( i ); + if ( sid.data.getStatus() == StepExecutionStatus.STATUS_HALTED ) { + return true; + } + } + return false; + } + + /** + * Gets the job start date. + * + * @return the job start date + */ + public Date getJobStartDate() { + return jobStartDate; + } + + /** + * Gets the job end date. + * + * @return the job end date + */ + public Date getJobEndDate() { + return jobEndDate; + } + + /** + * Sets the job end date. + * + * @param jobEndDate + * the jobEndDate to set + */ + public void setJobEndDate( Date jobEndDate ) { + this.jobEndDate = jobEndDate; + } + + /** + * Sets the job start date. + * + * @param jobStartDate + * the jobStartDate to set + */ + public void setJobStartDate( Date jobStartDate ) { + this.jobStartDate = jobStartDate; + } + + /** + * Get the batch ID that is passed from the parent job to the transformation. If nothing is passed, it's the + * transformation's batch ID + * + * @return the parent job's batch ID, or the transformation's batch ID if there is no parent job + */ + public long getPassedBatchId() { + return passedBatchId; + } + + /** + * Sets the passed batch ID of the transformation from the batch ID of the parent job. + * + * @param jobBatchId + * the jobBatchId to set + */ + public void setPassedBatchId( long jobBatchId ) { + this.passedBatchId = jobBatchId; + } + + /** + * Gets the batch ID of the transformation. + * + * @return the batch ID of the transformation + */ + public long getBatchId() { + return batchId; + } + + /** + * Sets the batch ID of the transformation. + * + * @param batchId + * the batch ID to set + */ + public void setBatchId( long batchId ) { + this.batchId = batchId; + } + + /** + * Gets the name of the thread that contains the transformation. + * + * @deprecated please use getTransactionId() instead + * @return the thread name + */ + @Deprecated + public String getThreadName() { + return threadName; + } + + /** + * Sets the thread name for the transformation. + * + * @deprecated please use setTransactionId() instead + * @param threadName + * the thread name + */ + @Deprecated + public void setThreadName( String threadName ) { + this.threadName = threadName; + } + + /** + * Gets the status of the transformation (Halting, Finished, Paused, etc.) + * + * @return the status of the transformation + */ + public String getStatus() { + String message; + + if ( running ) { + if ( isStopped() ) { + message = STRING_HALTING; + } else { + if ( isFinished() ) { + message = STRING_FINISHED; + if ( getResult().getNrErrors() > 0 ) { + message += " (with errors)"; + } + } else if ( isPaused() ) { + message = STRING_PAUSED; + } else { + message = STRING_RUNNING; + } + } + } else if ( isStopped() ) { + message = STRING_STOPPED; + } else if ( preparing ) { + message = STRING_PREPARING; + } else if ( initializing ) { + message = STRING_INITIALIZING; + } else { + message = STRING_WAITING; + } + + return message; + } + + /** + * Checks whether the transformation is initializing. + * + * @return true if the transformation is initializing, false otherwise + */ + public boolean isInitializing() { + return initializing; + } + + /** + * Sets whether the transformation is initializing. + * + * @param initializing + * true if the transformation is initializing, false otherwise + */ + public void setInitializing( boolean initializing ) { + this.initializing = initializing; + } + + /** + * Checks whether the transformation is preparing for execution. + * + * @return true if the transformation is preparing for execution, false otherwise + */ + public boolean isPreparing() { + return preparing; + } + + /** + * Sets whether the transformation is preparing for execution. + * + * @param preparing + * true if the transformation is preparing for execution, false otherwise + */ + public void setPreparing( boolean preparing ) { + this.preparing = preparing; + } + + /** + * Checks whether the transformation is running. + * + * @return true if the transformation is running, false otherwise + */ + public boolean isRunning() { + return running; + } + + /** + * Sets whether the transformation is running. + * + * @param running + * true if the transformation is running, false otherwise + */ + public void setRunning( boolean running ) { + this.running = running; + } + + /** + * Execute the transformation in a clustered fashion. The transformation steps are split and collected in a + * TransSplitter object + * + * @param transMeta + * the transformation's meta-data + * @param executionConfiguration + * the execution configuration + * @return the transformation splitter object + * @throws KettleException + * the kettle exception + */ + public static final TransSplitter executeClustered( final TransMeta transMeta, + final TransExecutionConfiguration executionConfiguration ) throws KettleException { + if ( Const.isEmpty( transMeta.getName() ) ) { + throw new KettleException( "The transformation needs a name to uniquely identify it by on the remote server." ); + } + + TransSplitter transSplitter = new TransSplitter( transMeta ); + transSplitter.splitOriginalTransformation(); + + // Pass the clustered run ID to allow for parallel execution of clustered transformations + // + executionConfiguration.getVariables().put( Const.INTERNAL_VARIABLE_CLUSTER_RUN_ID, transSplitter + .getClusteredRunId() ); + + executeClustered( transSplitter, executionConfiguration ); + return transSplitter; + } + + /** + * Executes an existing TransSplitter, with the transformation already split. + * + * @param transSplitter + * the trans splitter + * @param executionConfiguration + * the execution configuration + * @throws KettleException + * the kettle exception + * @see org.pentaho.di.ui.spoon.delegates.SpoonTransformationDelegate + */ + public static final void executeClustered( final TransSplitter transSplitter, + final TransExecutionConfiguration executionConfiguration ) throws KettleException { + try { + // Send the transformations to the servers... + // + // First the master and the slaves... + // + TransMeta master = transSplitter.getMaster(); + final SlaveServer[] slaves = transSplitter.getSlaveTargets(); + final Thread[] threads = new Thread[slaves.length]; + final Throwable[] errors = new Throwable[slaves.length]; + + // Keep track of the various Carte object IDs + // + final Map carteObjectMap = transSplitter.getCarteObjectMap(); + + // + // Send them all on their way... + // + SlaveServer masterServer = null; + List masterSteps = master.getTransHopSteps( false ); + if ( masterSteps.size() > 0 ) { // If there is something that needs to be done on the master... + masterServer = transSplitter.getMasterServer(); + if ( executionConfiguration.isClusterPosting() ) { + TransConfiguration transConfiguration = new TransConfiguration( master, executionConfiguration ); + Map variables = transConfiguration.getTransExecutionConfiguration().getVariables(); + variables.put( Const.INTERNAL_VARIABLE_CLUSTER_SIZE, Integer.toString( slaves.length ) ); + variables.put( Const.INTERNAL_VARIABLE_CLUSTER_MASTER, "Y" ); + + // Parameters override the variables but they need to pass over the configuration too... + // + Map params = transConfiguration.getTransExecutionConfiguration().getParams(); + TransMeta ot = transSplitter.getOriginalTransformation(); + for ( String param : ot.listParameters() ) { + String value = + Const.NVL( ot.getParameterValue( param ), Const.NVL( ot.getParameterDefault( param ), ot.getVariable( + param ) ) ); + params.put( param, value ); + } + + String masterReply = + masterServer.sendXML( transConfiguration.getXML(), RegisterTransServlet.CONTEXT_PATH + "/?xml=Y" ); + WebResult webResult = WebResult.fromXMLString( masterReply ); + if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) { + throw new KettleException( "An error occurred sending the master transformation: " + webResult + .getMessage() ); + } + carteObjectMap.put( master, webResult.getId() ); + } + } + + // Then the slaves... + // These are started in a background thread. + // + for ( int i = 0; i < slaves.length; i++ ) { + final int index = i; + + final TransMeta slaveTrans = transSplitter.getSlaveTransMap().get( slaves[i] ); + + if ( executionConfiguration.isClusterPosting() ) { + Runnable runnable = new Runnable() { + public void run() { + try { + // Create a copy for local use... We get race-conditions otherwise... + // + TransExecutionConfiguration slaveTransExecutionConfiguration = + (TransExecutionConfiguration) executionConfiguration.clone(); + TransConfiguration transConfiguration = + new TransConfiguration( slaveTrans, slaveTransExecutionConfiguration ); + + Map variables = slaveTransExecutionConfiguration.getVariables(); + variables.put( Const.INTERNAL_VARIABLE_SLAVE_SERVER_NUMBER, Integer.toString( index ) ); + variables.put( Const.INTERNAL_VARIABLE_SLAVE_SERVER_NAME, slaves[index].getName() ); + variables.put( Const.INTERNAL_VARIABLE_CLUSTER_SIZE, Integer.toString( slaves.length ) ); + variables.put( Const.INTERNAL_VARIABLE_CLUSTER_MASTER, "N" ); + + // Parameters override the variables but they need to pass over the configuration too... + // + Map params = slaveTransExecutionConfiguration.getParams(); + TransMeta ot = transSplitter.getOriginalTransformation(); + for ( String param : ot.listParameters() ) { + String value = + Const.NVL( ot.getParameterValue( param ), Const.NVL( ot.getParameterDefault( param ), ot + .getVariable( param ) ) ); + params.put( param, value ); + } + + String slaveReply = + slaves[index].sendXML( transConfiguration.getXML(), RegisterTransServlet.CONTEXT_PATH + "/?xml=Y" ); + WebResult webResult = WebResult.fromXMLString( slaveReply ); + if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) { + throw new KettleException( "An error occurred sending a slave transformation: " + webResult + .getMessage() ); + } + carteObjectMap.put( slaveTrans, webResult.getId() ); + } catch ( Throwable t ) { + errors[index] = t; + } + } + }; + threads[i] = new Thread( runnable ); + } + } + + // Start the slaves + for ( int i = 0; i < threads.length; i++ ) { + if ( threads[i] != null ) { + threads[i].start(); + } + } + + // Wait until the slaves report back... + // Sending the XML over is the heaviest part + // Later we can do the others as well... + // + for ( int i = 0; i < threads.length; i++ ) { + if ( threads[i] != null ) { + threads[i].join(); + if ( errors[i] != null ) { + throw new KettleException( errors[i] ); + } + } + } + + if ( executionConfiguration.isClusterPosting() ) { + if ( executionConfiguration.isClusterPreparing() ) { + // Prepare the master... + if ( masterSteps.size() > 0 ) { // If there is something that needs to be done on the master... + String carteObjectId = carteObjectMap.get( master ); + String masterReply = + masterServer.execService( PrepareExecutionTransServlet.CONTEXT_PATH + "/?name=" + URLEncoder.encode( + master.getName(), "UTF-8" ) + "&id=" + URLEncoder.encode( carteObjectId, "UTF-8" ) + "&xml=Y" ); + WebResult webResult = WebResult.fromXMLString( masterReply ); + if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) { + throw new KettleException( + "An error occurred while preparing the execution of the master transformation: " + webResult + .getMessage() ); + } + } + + // Prepare the slaves + // WG: Should these be threaded like the above initialization? + for ( int i = 0; i < slaves.length; i++ ) { + TransMeta slaveTrans = transSplitter.getSlaveTransMap().get( slaves[i] ); + String carteObjectId = carteObjectMap.get( slaveTrans ); + String slaveReply = + slaves[i].execService( PrepareExecutionTransServlet.CONTEXT_PATH + "/?name=" + URLEncoder.encode( + slaveTrans.getName(), "UTF-8" ) + "&id=" + URLEncoder.encode( carteObjectId, "UTF-8" ) + "&xml=Y" ); + WebResult webResult = WebResult.fromXMLString( slaveReply ); + if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) { + throw new KettleException( "An error occurred while preparing the execution of a slave transformation: " + + webResult.getMessage() ); + } + } + } + + if ( executionConfiguration.isClusterStarting() ) { + // Start the master... + if ( masterSteps.size() > 0 ) { // If there is something that needs to be done on the master... + String carteObjectId = carteObjectMap.get( master ); + String masterReply = + masterServer.execService( StartExecutionTransServlet.CONTEXT_PATH + "/?name=" + URLEncoder.encode( + master.getName(), "UTF-8" ) + "&id=" + URLEncoder.encode( carteObjectId, "UTF-8" ) + "&xml=Y" ); + WebResult webResult = WebResult.fromXMLString( masterReply ); + if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) { + throw new KettleException( "An error occurred while starting the execution of the master transformation: " + + webResult.getMessage() ); + } + } + + // Start the slaves + // WG: Should these be threaded like the above initialization? + for ( int i = 0; i < slaves.length; i++ ) { + TransMeta slaveTrans = transSplitter.getSlaveTransMap().get( slaves[i] ); + String carteObjectId = carteObjectMap.get( slaveTrans ); + String slaveReply = + slaves[i].execService( StartExecutionTransServlet.CONTEXT_PATH + "/?name=" + URLEncoder.encode( + slaveTrans.getName(), "UTF-8" ) + "&id=" + URLEncoder.encode( carteObjectId, "UTF-8" ) + "&xml=Y" ); + WebResult webResult = WebResult.fromXMLString( slaveReply ); + if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) { + throw new KettleException( "An error occurred while starting the execution of a slave transformation: " + + webResult.getMessage() ); + } + } + } + } + } catch ( KettleException ke ) { + throw ke; + } catch ( Exception e ) { + throw new KettleException( "There was an error during transformation split", e ); + } + } + + /** + * Monitors a clustered transformation every second, after all the transformations in a cluster schema are running. + *
+ * Now we should verify that they are all running as they should.
+ * If a transformation has an error, we should kill them all.
+ * This should happen in a separate thread to prevent blocking of the UI.
+ *
+ * When the master and slave transformations have all finished, we should also run
+ * a cleanup on those transformations to release sockets, etc.
+ *
+ * + * @param log + * the log interface channel + * @param transSplitter + * the transformation splitter object + * @param parentJob + * the parent job when executed in a job, otherwise just set to null + * @return the number of errors encountered + */ + public static final long monitorClusteredTransformation( LogChannelInterface log, TransSplitter transSplitter, + Job parentJob ) { + return monitorClusteredTransformation( log, transSplitter, parentJob, 1 ); // monitor every 1 seconds + } + + /** + * Monitors a clustered transformation every second, after all the transformations in a cluster schema are running. + *
+ * Now we should verify that they are all running as they should.
+ * If a transformation has an error, we should kill them all.
+ * This should happen in a separate thread to prevent blocking of the UI.
+ *
+ * When the master and slave transformations have all finished, we should also run
+ * a cleanup on those transformations to release sockets, etc.
+ *
+ * + * @param log + * the subject to use for logging + * @param transSplitter + * the transformation splitter object + * @param parentJob + * the parent job when executed in a job, otherwise just set to null + * @param sleepTimeSeconds + * the sleep time in seconds in between slave transformation status polling + * @return the number of errors encountered + */ + public static final long monitorClusteredTransformation( LogChannelInterface log, TransSplitter transSplitter, + Job parentJob, int sleepTimeSeconds ) { + long errors = 0L; + + // + // See if the remote transformations have finished. + // We could just look at the master, but I doubt that that is enough in all + // situations. + // + SlaveServer[] slaveServers = transSplitter.getSlaveTargets(); // <-- ask + // these guys + TransMeta[] slaves = transSplitter.getSlaves(); + Map carteObjectMap = transSplitter.getCarteObjectMap(); + + SlaveServer masterServer; + try { + masterServer = transSplitter.getMasterServer(); + } catch ( KettleException e ) { + log.logError( "Error getting the master server", e ); + masterServer = null; + errors++; + } + TransMeta masterTransMeta = transSplitter.getMaster(); + + boolean allFinished = false; + while ( !allFinished && errors == 0 && ( parentJob == null || !parentJob.isStopped() ) ) { + allFinished = true; + errors = 0L; + + // Slaves first... + // + for ( int s = 0; s < slaveServers.length && allFinished && errors == 0; s++ ) { + try { + String carteObjectId = carteObjectMap.get( slaves[s] ); + SlaveServerTransStatus transStatus = slaveServers[s].getTransStatus( slaves[s].getName(), carteObjectId, 0 ); + if ( transStatus.isRunning() ) { + if ( log.isDetailed() ) { + log.logDetailed( "Slave transformation on '" + slaveServers[s] + "' is still running." ); + } + allFinished = false; + } else { + if ( log.isDetailed() ) { + log.logDetailed( "Slave transformation on '" + slaveServers[s] + "' has finished." ); + } + } + errors += transStatus.getNrStepErrors(); + } catch ( Exception e ) { + errors += 1; + log.logError( "Unable to contact slave server '" + slaveServers[s].getName() + + "' to check slave transformation : " + e.toString() ); + } + } + + // Check the master too + if ( allFinished && errors == 0 && masterTransMeta != null && masterTransMeta.nrSteps() > 0 ) { + try { + String carteObjectId = carteObjectMap.get( masterTransMeta ); + SlaveServerTransStatus transStatus = + masterServer.getTransStatus( masterTransMeta.getName(), carteObjectId, 0 ); + if ( transStatus.isRunning() ) { + if ( log.isDetailed() ) { + log.logDetailed( "Master transformation is still running." ); + } + allFinished = false; + } else { + if ( log.isDetailed() ) { + log.logDetailed( "Master transformation has finished." ); + } + } + Result result = transStatus.getResult( transSplitter.getOriginalTransformation() ); + errors += result.getNrErrors(); + } catch ( Exception e ) { + errors += 1; + log.logError( "Unable to contact master server '" + masterServer.getName() + + "' to check master transformation : " + e.toString() ); + } + } + + if ( ( parentJob != null && parentJob.isStopped() ) || errors != 0 ) { + // + // Stop all slaves and the master on the slave servers + // + for ( int s = 0; s < slaveServers.length && allFinished && errors == 0; s++ ) { + try { + String carteObjectId = carteObjectMap.get( slaves[s] ); + WebResult webResult = slaveServers[s].stopTransformation( slaves[s].getName(), carteObjectId ); + if ( !WebResult.STRING_OK.equals( webResult.getResult() ) ) { + log.logError( "Unable to stop slave transformation '" + slaves[s].getName() + "' : " + webResult + .getMessage() ); + } + } catch ( Exception e ) { + errors += 1; + log.logError( "Unable to contact slave server '" + slaveServers[s].getName() + "' to stop transformation : " + + e.toString() ); + } + } + + try { + String carteObjectId = carteObjectMap.get( masterTransMeta ); + WebResult webResult = masterServer.stopTransformation( masterTransMeta.getName(), carteObjectId ); + if ( !WebResult.STRING_OK.equals( webResult.getResult() ) ) { + log.logError( "Unable to stop master transformation '" + masterServer.getName() + "' : " + webResult + .getMessage() ); + } + } catch ( Exception e ) { + errors += 1; + log.logError( "Unable to contact master server '" + masterServer.getName() + "' to stop the master : " + e + .toString() ); + } + } + + // + // Keep waiting until all transformations have finished + // If needed, we stop them again and again until they yield. + // + if ( !allFinished ) { + // Not finished or error: wait a bit longer + if ( log.isDetailed() ) { + log.logDetailed( "Clustered transformation is still running, waiting a few seconds..." ); + } + try { + Thread.sleep( sleepTimeSeconds * 2000 ); + } catch ( Exception e ) { + // Ignore errors + } // Check all slaves every x seconds. + } + } + + log.logBasic( "All transformations in the cluster have finished." ); + + errors += cleanupCluster( log, transSplitter ); + + return errors; + } + + /** + * Cleanup the cluster, including the master and all slaves, and return the number of errors that occurred. + * + * @param log + * the log channel interface + * @param transSplitter + * the TransSplitter object + * @return the number of errors that occurred in the clustered transformation + */ + public static int cleanupCluster( LogChannelInterface log, TransSplitter transSplitter ) { + + SlaveServer[] slaveServers = transSplitter.getSlaveTargets(); + TransMeta[] slaves = transSplitter.getSlaves(); + SlaveServer masterServer; + try { + masterServer = transSplitter.getMasterServer(); + } catch ( KettleException e ) { + log.logError( "Unable to obtain the master server from the cluster", e ); + return 1; + } + TransMeta masterTransMeta = transSplitter.getMaster(); + int errors = 0; + + // All transformations have finished, with or without error. + // Now run a cleanup on all the transformation on the master and the slaves. + // + // Slaves first... + // + for ( int s = 0; s < slaveServers.length; s++ ) { + try { + cleanupSlaveServer( transSplitter, slaveServers[s], slaves[s] ); + } catch ( Exception e ) { + errors++; + log.logError( "Unable to contact slave server '" + slaveServers[s].getName() + + "' to clean up slave transformation", e ); + } + } + + // Clean up the master too + // + if ( masterTransMeta != null && masterTransMeta.nrSteps() > 0 ) { + try { + cleanupSlaveServer( transSplitter, masterServer, masterTransMeta ); + } catch ( Exception e ) { + errors++; + log.logError( "Unable to contact master server '" + masterServer.getName() + + "' to clean up master transformation", e ); + } + + // Also de-allocate all ports used for this clustered transformation on the master. + // + try { + // Deallocate all ports belonging to this clustered run, not anything else + // + masterServer.deAllocateServerSockets( transSplitter.getOriginalTransformation().getName(), transSplitter + .getClusteredRunId() ); + } catch ( Exception e ) { + errors++; + log.logError( "Unable to contact master server '" + masterServer.getName() + + "' to clean up port sockets for transformation'" + transSplitter.getOriginalTransformation().getName() + + "'", e ); + } + } + + return errors; + } + + /** + * Cleanup the slave server as part of a clustered transformation. + * + * @param transSplitter + * the TransSplitter object + * @param slaveServer + * the slave server + * @param slaveTransMeta + * the slave transformation meta-data + * @throws KettleException + * if any errors occur during cleanup + */ + public static void cleanupSlaveServer( TransSplitter transSplitter, SlaveServer slaveServer, + TransMeta slaveTransMeta ) throws KettleException { + String transName = slaveTransMeta.getName(); + try { + String carteObjectId = transSplitter.getCarteObjectMap().get( slaveTransMeta ); + WebResult webResult = slaveServer.cleanupTransformation( transName, carteObjectId ); + if ( !WebResult.STRING_OK.equals( webResult.getResult() ) ) { + throw new KettleException( "Unable to run clean-up on slave server '" + slaveServer + "' for transformation '" + + transName + "' : " + webResult.getMessage() ); + } + } catch ( Exception e ) { + throw new KettleException( "Unexpected error contacting slave server '" + slaveServer + + "' to clear up transformation '" + transName + "'", e ); + } + } + + /** + * Gets the clustered transformation result. + * + * @param log + * the log channel interface + * @param transSplitter + * the TransSplitter object + * @param parentJob + * the parent job + * @return the clustered transformation result + */ + public static final Result getClusteredTransformationResult( LogChannelInterface log, TransSplitter transSplitter, + Job parentJob ) { + return getClusteredTransformationResult( log, transSplitter, parentJob, false ); + } + + /** + * Gets the clustered transformation result. + * + * @param log + * the log channel interface + * @param transSplitter + * the TransSplitter object + * @param parentJob + * the parent job + * @param loggingRemoteWork + * log remote execution logs locally + * @return the clustered transformation result + */ + public static final Result getClusteredTransformationResult( LogChannelInterface log, TransSplitter transSplitter, + Job parentJob, boolean loggingRemoteWork ) { + Result result = new Result(); + // + // See if the remote transformations have finished. + // We could just look at the master, but I doubt that that is enough in all situations. + // + SlaveServer[] slaveServers = transSplitter.getSlaveTargets(); // <-- ask these guys + TransMeta[] slaves = transSplitter.getSlaves(); + + SlaveServer masterServer; + try { + masterServer = transSplitter.getMasterServer(); + } catch ( KettleException e ) { + log.logError( "Error getting the master server", e ); + masterServer = null; + result.setNrErrors( result.getNrErrors() + 1 ); + } + TransMeta master = transSplitter.getMaster(); + + // Slaves first... + // + for ( int s = 0; s < slaveServers.length; s++ ) { + try { + // Get the detailed status of the slave transformation... + // + SlaveServerTransStatus transStatus = slaveServers[s].getTransStatus( slaves[s].getName(), "", 0 ); + Result transResult = transStatus.getResult( slaves[s] ); + + result.add( transResult ); + + if ( loggingRemoteWork ) { + log.logBasic( "-- Slave : " + slaveServers[s].getName() ); + log.logBasic( transStatus.getLoggingString() ); + } + } catch ( Exception e ) { + result.setNrErrors( result.getNrErrors() + 1 ); + log.logError( "Unable to contact slave server '" + slaveServers[s].getName() + + "' to get result of slave transformation : " + e.toString() ); + } + } + + // Clean up the master too + // + if ( master != null && master.nrSteps() > 0 ) { + try { + // Get the detailed status of the slave transformation... + // + SlaveServerTransStatus transStatus = masterServer.getTransStatus( master.getName(), "", 0 ); + Result transResult = transStatus.getResult( master ); + + result.add( transResult ); + + if ( loggingRemoteWork ) { + log.logBasic( "-- Master : " + masterServer.getName() ); + log.logBasic( transStatus.getLoggingString() ); + } + } catch ( Exception e ) { + result.setNrErrors( result.getNrErrors() + 1 ); + log.logError( "Unable to contact master server '" + masterServer.getName() + + "' to get result of master transformation : " + e.toString() ); + } + } + + return result; + } + + /** + * Send the transformation for execution to a Carte slave server. + * + * @param transMeta + * the transformation meta-data + * @param executionConfiguration + * the transformation execution configuration + * @param repository + * the repository + * @return The Carte object ID on the server. + * @throws KettleException + * if any errors occur during the dispatch to the slave server + */ + public static String sendToSlaveServer( TransMeta transMeta, TransExecutionConfiguration executionConfiguration, + Repository repository, IMetaStore metaStore ) throws KettleException { + String carteObjectId; + SlaveServer slaveServer = executionConfiguration.getRemoteServer(); + + if ( slaveServer == null ) { + throw new KettleException( "No slave server specified" ); + } + if ( Const.isEmpty( transMeta.getName() ) ) { + throw new KettleException( "The transformation needs a name to uniquely identify it by on the remote server." ); + } + + try { + // Inject certain internal variables to make it more intuitive. + // + Map vars = new HashMap(); + + for ( String var : Const.INTERNAL_TRANS_VARIABLES ) { + vars.put( var, transMeta.getVariable( var ) ); + } + for ( String var : Const.INTERNAL_JOB_VARIABLES ) { + vars.put( var, transMeta.getVariable( var ) ); + } + + executionConfiguration.getVariables().putAll( vars ); + slaveServer.injectVariables( executionConfiguration.getVariables() ); + + slaveServer.getLogChannel().setLogLevel( executionConfiguration.getLogLevel() ); + + if ( executionConfiguration.isPassingExport() ) { + + // First export the job... + // + FileObject tempFile = + KettleVFS.createTempFile( "transExport", ".zip", System.getProperty( "java.io.tmpdir" ), transMeta ); + + TopLevelResource topLevelResource = + ResourceUtil.serializeResourceExportInterface( tempFile.getName().toString(), transMeta, transMeta, + repository, metaStore, executionConfiguration.getXML(), CONFIGURATION_IN_EXPORT_FILENAME ); + + // Send the zip file over to the slave server... + // + String result = + slaveServer.sendExport( topLevelResource.getArchiveName(), AddExportServlet.TYPE_TRANS, topLevelResource + .getBaseResourceName() ); + WebResult webResult = WebResult.fromXMLString( result ); + if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) { + throw new KettleException( "There was an error passing the exported transformation to the remote server: " + + Const.CR + webResult.getMessage() ); + } + carteObjectId = webResult.getId(); + } else { + + // Now send it off to the remote server... + // + String xml = new TransConfiguration( transMeta, executionConfiguration ).getXML(); + String reply = slaveServer.sendXML( xml, RegisterTransServlet.CONTEXT_PATH + "/?xml=Y" ); + WebResult webResult = WebResult.fromXMLString( reply ); + if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) { + throw new KettleException( "There was an error posting the transformation on the remote server: " + Const.CR + + webResult.getMessage() ); + } + carteObjectId = webResult.getId(); + } + + // Prepare the transformation + // + String reply = + slaveServer.execService( PrepareExecutionTransServlet.CONTEXT_PATH + "/?name=" + URLEncoder.encode( transMeta + .getName(), "UTF-8" ) + "&xml=Y&id=" + carteObjectId ); + WebResult webResult = WebResult.fromXMLString( reply ); + if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) { + throw new KettleException( "There was an error preparing the transformation for excution on the remote server: " + + Const.CR + webResult.getMessage() ); + } + + // Start the transformation + // + reply = + slaveServer.execService( StartExecutionTransServlet.CONTEXT_PATH + "/?name=" + URLEncoder.encode( transMeta + .getName(), "UTF-8" ) + "&xml=Y&id=" + carteObjectId ); + webResult = WebResult.fromXMLString( reply ); + + if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) { + throw new KettleException( "There was an error starting the transformation on the remote server: " + Const.CR + + webResult.getMessage() ); + } + + return carteObjectId; + } catch ( KettleException ke ) { + throw ke; + } catch ( Exception e ) { + throw new KettleException( e ); + } + } + + /** + * Checks whether the transformation is ready to start (i.e. execution preparation was successful) + * + * @return true if the transformation was prepared for execution successfully, false otherwise + * @see org.pentaho.di.trans.Trans#prepareExecution(String[]) + */ + public boolean isReadyToStart() { + return readyToStart; + } + + /** + * Sets the internal kettle variables. + * + * @param var + * the new internal kettle variables + */ + public void setInternalKettleVariables( VariableSpace var ) { + if ( transMeta != null && !Const.isEmpty( transMeta.getFilename() ) ) { // we have a finename that's defined. + try { + FileObject fileObject = KettleVFS.getFileObject( transMeta.getFilename(), var ); + FileName fileName = fileObject.getName(); + + // The filename of the transformation + variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_NAME, fileName.getBaseName() ); + + // The directory of the transformation + FileName fileDir = fileName.getParent(); + variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY, fileDir.getURI() ); + } catch ( KettleFileException e ) { + variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY, "" ); + variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_NAME, "" ); + } + } else { + variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY, "" ); + variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_NAME, "" ); + } + + boolean hasRepoDir = transMeta.getRepositoryDirectory() != null && transMeta.getRepository() != null; + + // The name of the transformation + variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_NAME, Const.NVL( transMeta.getName(), "" ) ); + + // setup fallbacks + if ( hasRepoDir ) { + variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY, variables.getVariable( + Const.INTERNAL_VARIABLE_TRANSFORMATION_REPOSITORY_DIRECTORY ) ); + } else { + variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_REPOSITORY_DIRECTORY, variables.getVariable( + Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY ) ); + } + + // TODO PUT THIS INSIDE OF THE "IF" + // The name of the directory in the repository + variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_REPOSITORY_DIRECTORY, transMeta + .getRepositoryDirectory() != null ? transMeta.getRepositoryDirectory().getPath() : "" ); + + // Here we don't clear the definition of the job specific parameters, as they may come in handy. + // A transformation can be called from a job and may inherit the job internal variables + // but the other around is not possible. + + if ( hasRepoDir ) { + variables.setVariable( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY, variables.getVariable( + Const.INTERNAL_VARIABLE_TRANSFORMATION_REPOSITORY_DIRECTORY ) ); + if ( "/".equals( variables.getVariable( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY ) ) ) { + variables.setVariable( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY, "" ); + } + } else { + variables.setVariable( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY, variables.getVariable( + Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY ) ); + } + } + + /** + * Copies variables from a given variable space to this transformation. + * + * @param space + * the variable space + * @see org.pentaho.di.core.variables.VariableSpace#copyVariablesFrom(org.pentaho.di.core.variables.VariableSpace) + */ + public void copyVariablesFrom( VariableSpace space ) { + variables.copyVariablesFrom( space ); + } + + /** + * Substitutes any variable values into the given string, and returns the resolved string. + * + * @param aString + * the string to resolve against environment variables + * @return the string after variables have been resolved/susbstituted + * @see org.pentaho.di.core.variables.VariableSpace#environmentSubstitute(java.lang.String) + */ + public String environmentSubstitute( String aString ) { + return variables.environmentSubstitute( aString ); + } + + /** + * Substitutes any variable values into each of the given strings, and returns an array containing the resolved + * string(s). + * + * @param aString + * an array of strings to resolve against environment variables + * @return the array of strings after variables have been resolved/susbstituted + * @see org.pentaho.di.core.variables.VariableSpace#environmentSubstitute(java.lang.String[]) + */ + public String[] environmentSubstitute( String[] aString ) { + return variables.environmentSubstitute( aString ); + } + + public String fieldSubstitute( String aString, RowMetaInterface rowMeta, Object[] rowData ) + throws KettleValueException { + return variables.fieldSubstitute( aString, rowMeta, rowData ); + } + + /** + * Gets the parent variable space. + * + * @return the parent variable space + * @see org.pentaho.di.core.variables.VariableSpace#getParentVariableSpace() + */ + public VariableSpace getParentVariableSpace() { + return variables.getParentVariableSpace(); + } + + /** + * Sets the parent variable space. + * + * @param parent + * the new parent variable space + * @see org.pentaho.di.core.variables.VariableSpace#setParentVariableSpace( + * org.pentaho.di.core.variables.VariableSpace) + */ + public void setParentVariableSpace( VariableSpace parent ) { + variables.setParentVariableSpace( parent ); + } + + /** + * Gets the value of the specified variable, or returns a default value if no such variable exists. + * + * @param variableName + * the variable name + * @param defaultValue + * the default value + * @return the value of the specified variable, or returns a default value if no such variable exists + * @see org.pentaho.di.core.variables.VariableSpace#getVariable(java.lang.String, java.lang.String) + */ + public String getVariable( String variableName, String defaultValue ) { + return variables.getVariable( variableName, defaultValue ); + } + + /** + * Gets the value of the specified variable, or returns a default value if no such variable exists. + * + * @param variableName + * the variable name + * @return the value of the specified variable, or returns a default value if no such variable exists + * @see org.pentaho.di.core.variables.VariableSpace#getVariable(java.lang.String) + */ + public String getVariable( String variableName ) { + return variables.getVariable( variableName ); + } + + /** + * Returns a boolean representation of the specified variable after performing any necessary substitution. Truth + * values include case-insensitive versions of "Y", "YES", "TRUE" or "1". + * + * @param variableName + * the variable name + * @param defaultValue + * the default value + * @return a boolean representation of the specified variable after performing any necessary substitution + * @see org.pentaho.di.core.variables.VariableSpace#getBooleanValueOfVariable(java.lang.String, boolean) + */ + public boolean getBooleanValueOfVariable( String variableName, boolean defaultValue ) { + if ( !Const.isEmpty( variableName ) ) { + String value = environmentSubstitute( variableName ); + if ( !Const.isEmpty( value ) ) { + return ValueMeta.convertStringToBoolean( value ); + } + } + return defaultValue; + } + + /** + * Sets the values of the transformation's variables to the values from the parent variables. + * + * @param parent + * the parent + * @see org.pentaho.di.core.variables.VariableSpace#initializeVariablesFrom( + * org.pentaho.di.core.variables.VariableSpace) + */ + public void initializeVariablesFrom( VariableSpace parent ) { + variables.initializeVariablesFrom( parent ); + } + + /** + * Gets a list of variable names for the transformation. + * + * @return a list of variable names + * @see org.pentaho.di.core.variables.VariableSpace#listVariables() + */ + public String[] listVariables() { + return variables.listVariables(); + } + + /** + * Sets the value of the specified variable to the specified value. + * + * @param variableName + * the variable name + * @param variableValue + * the variable value + * @see org.pentaho.di.core.variables.VariableSpace#setVariable(java.lang.String, java.lang.String) + */ + public void setVariable( String variableName, String variableValue ) { + variables.setVariable( variableName, variableValue ); + } + + /** + * Shares a variable space from another variable space. This means that the object should take over the space used as + * argument. + * + * @param space + * the variable space + * @see org.pentaho.di.core.variables.VariableSpace#shareVariablesWith(org.pentaho.di.core.variables.VariableSpace) + */ + public void shareVariablesWith( VariableSpace space ) { + variables = space; + } + + /** + * Injects variables using the given Map. The behavior should be that the properties object will be stored and at the + * time the VariableSpace is initialized (or upon calling this method if the space is already initialized). After + * injecting the link of the properties object should be removed. + * + * @param prop + * the property map + * @see org.pentaho.di.core.variables.VariableSpace#injectVariables(java.util.Map) + */ + public void injectVariables( Map prop ) { + variables.injectVariables( prop ); + } + + /** + * Pauses the transformation (pause all steps). + */ + public void pauseRunning() { + paused.set( true ); + for ( StepMetaDataCombi combi : steps ) { + combi.step.pauseRunning(); + } + } + + /** + * Resumes running the transformation after a pause (resume all steps). + */ + public void resumeRunning() { + for ( StepMetaDataCombi combi : steps ) { + combi.step.resumeRunning(); + } + paused.set( false ); + } + + /** + * Checks whether the transformation is being previewed. + * + * @return true if the transformation is being previewed, false otherwise + */ + public boolean isPreview() { + return preview; + } + + /** + * Sets whether the transformation is being previewed. + * + * @param preview + * true if the transformation is being previewed, false otherwise + */ + public void setPreview( boolean preview ) { + this.preview = preview; + } + + /** + * Gets the repository object for the transformation. + * + * @return the repository + */ + public Repository getRepository() { + + if ( repository == null ) { + // Does the transmeta have a repo? + // This is a valid case, when a non-repo trans is attempting to retrieve + // a transformation in the repository. + if ( transMeta != null ) { + return transMeta.getRepository(); + } + } + return repository; + } + + /** + * Sets the repository object for the transformation. + * + * @param repository + * the repository object to set + */ + public void setRepository( Repository repository ) { + this.repository = repository; + if ( transMeta != null ) { + transMeta.setRepository( repository ); + } + } + + /** + * Gets a named list (map) of step performance snapshots. + * + * @return a named list (map) of step performance snapshots + */ + public Map> getStepPerformanceSnapShots() { + return stepPerformanceSnapShots; + } + + /** + * Sets the named list (map) of step performance snapshots. + * + * @param stepPerformanceSnapShots + * a named list (map) of step performance snapshots to set + */ + public void setStepPerformanceSnapShots( Map> stepPerformanceSnapShots ) { + this.stepPerformanceSnapShots = stepPerformanceSnapShots; + } + + /** + * Gets a list of the transformation listeners. Please do not attempt to modify this list externally. Returned list is + * mutable only for backward compatibility purposes. + * + * @return the transListeners + */ + public List getTransListeners() { + return transListeners; + } + + /** + * Sets the list of transformation listeners. + * + * @param transListeners + * the transListeners to set + */ + public void setTransListeners( List transListeners ) { + this.transListeners = Collections.synchronizedList( transListeners ); + } + + /** + * Adds a transformation listener. + * + * @param transListener + * the trans listener + */ + public void addTransListener( TransListener transListener ) { + // PDI-5229 sync added + synchronized ( transListeners ) { + transListeners.add( transListener ); + } + } + + /** + * Sets the list of stop-event listeners for the transformation. + * + * @param transStoppedListeners + * the list of stop-event listeners to set + */ + public void setTransStoppedListeners( List transStoppedListeners ) { + this.transStoppedListeners = Collections.synchronizedList( transStoppedListeners ); + } + + /** + * Gets the list of stop-event listeners for the transformation. This is not concurrent safe. Please note this is + * mutable implementation only for backward compatibility reasons. + * + * @return the list of stop-event listeners + */ + public List getTransStoppedListeners() { + return transStoppedListeners; + } + + /** + * Adds a stop-event listener to the transformation. + * + * @param transStoppedListener + * the stop-event listener to add + */ + public void addTransStoppedListener( TransStoppedListener transStoppedListener ) { + transStoppedListeners.add( transStoppedListener ); + } + + /** + * Checks if the transformation is paused. + * + * @return true if the transformation is paused, false otherwise + */ + public boolean isPaused() { + return paused.get(); + } + + /** + * Checks if the transformation is stopped. + * + * @return true if the transformation is stopped, false otherwise + */ + public boolean isStopped() { + return stopped.get(); + } + + /** + * Monitors a remote transformation every 5 seconds. + * + * @param log + * the log channel interface + * @param carteObjectId + * the Carte object ID + * @param transName + * the transformation name + * @param remoteSlaveServer + * the remote slave server + */ + public static void monitorRemoteTransformation( LogChannelInterface log, String carteObjectId, String transName, + SlaveServer remoteSlaveServer ) { + monitorRemoteTransformation( log, carteObjectId, transName, remoteSlaveServer, 5 ); + } + + /** + * Monitors a remote transformation at the specified interval. + * + * @param log + * the log channel interface + * @param carteObjectId + * the Carte object ID + * @param transName + * the transformation name + * @param remoteSlaveServer + * the remote slave server + * @param sleepTimeSeconds + * the sleep time (in seconds) + */ + public static void monitorRemoteTransformation( LogChannelInterface log, String carteObjectId, String transName, + SlaveServer remoteSlaveServer, int sleepTimeSeconds ) { + long errors = 0; + boolean allFinished = false; + while ( !allFinished && errors == 0 ) { + allFinished = true; + errors = 0L; + + // Check the remote server + if ( allFinished && errors == 0 ) { + try { + SlaveServerTransStatus transStatus = remoteSlaveServer.getTransStatus( transName, carteObjectId, 0 ); + if ( transStatus.isRunning() ) { + if ( log.isDetailed() ) { + log.logDetailed( transName, "Remote transformation is still running." ); + } + allFinished = false; + } else { + if ( log.isDetailed() ) { + log.logDetailed( transName, "Remote transformation has finished." ); + } + } + Result result = transStatus.getResult(); + errors += result.getNrErrors(); + } catch ( Exception e ) { + errors += 1; + log.logError( transName, "Unable to contact remote slave server '" + remoteSlaveServer.getName() + + "' to check transformation status : " + e.toString() ); + } + } + + // + // Keep waiting until all transformations have finished + // If needed, we stop them again and again until they yield. + // + if ( !allFinished ) { + // Not finished or error: wait a bit longer + if ( log.isDetailed() ) { + log.logDetailed( transName, "The remote transformation is still running, waiting a few seconds..." ); + } + try { + Thread.sleep( sleepTimeSeconds * 1000 ); + } catch ( Exception e ) { + // Ignore errors + } // Check all slaves every x seconds. + } + } + + log.logMinimal( transName, "The remote transformation has finished." ); + + // Clean up the remote transformation + // + try { + WebResult webResult = remoteSlaveServer.cleanupTransformation( transName, carteObjectId ); + if ( !WebResult.STRING_OK.equals( webResult.getResult() ) ) { + log.logError( transName, "Unable to run clean-up on remote transformation '" + transName + "' : " + webResult + .getMessage() ); + errors += 1; + } + } catch ( Exception e ) { + errors += 1; + log.logError( transName, "Unable to contact slave server '" + remoteSlaveServer.getName() + + "' to clean up transformation : " + e.toString() ); + } + } + + /** + * Adds a parameter definition to this transformation. + * + * @param key + * the name of the parameter + * @param defValue + * the default value for the parameter + * @param description + * the description of the parameter + * @throws DuplicateParamException + * the duplicate param exception + * @see org.pentaho.di.core.parameters.NamedParams#addParameterDefinition(java.lang.String, java.lang.String, + * java.lang.String) + */ + public void addParameterDefinition( String key, String defValue, String description ) throws DuplicateParamException { + namedParams.addParameterDefinition( key, defValue, description ); + } + + /** + * Gets the default value of the specified parameter. + * + * @param key + * the name of the parameter + * @return the default value of the parameter + * @throws UnknownParamException + * if the parameter does not exist + * @see org.pentaho.di.core.parameters.NamedParams#getParameterDefault(java.lang.String) + */ + public String getParameterDefault( String key ) throws UnknownParamException { + return namedParams.getParameterDefault( key ); + } + + /** + * Gets the description of the specified parameter. + * + * @param key + * the name of the parameter + * @return the parameter description + * @throws UnknownParamException + * if the parameter does not exist + * @see org.pentaho.di.core.parameters.NamedParams#getParameterDescription(java.lang.String) + */ + public String getParameterDescription( String key ) throws UnknownParamException { + return namedParams.getParameterDescription( key ); + } + + /** + * Gets the value of the specified parameter. + * + * @param key + * the name of the parameter + * @return the parameter value + * @throws UnknownParamException + * if the parameter does not exist + * @see org.pentaho.di.core.parameters.NamedParams#getParameterValue(java.lang.String) + */ + public String getParameterValue( String key ) throws UnknownParamException { + return namedParams.getParameterValue( key ); + } + + /** + * Gets a list of the parameters for the transformation. + * + * @return an array of strings containing the names of all parameters for the transformation + * @see org.pentaho.di.core.parameters.NamedParams#listParameters() + */ + public String[] listParameters() { + return namedParams.listParameters(); + } + + /** + * Sets the value for the specified parameter. + * + * @param key + * the name of the parameter + * @param value + * the name of the value + * @throws UnknownParamException + * if the parameter does not exist + * @see org.pentaho.di.core.parameters.NamedParams#setParameterValue(java.lang.String, java.lang.String) + */ + public void setParameterValue( String key, String value ) throws UnknownParamException { + namedParams.setParameterValue( key, value ); + } + + /** + * Remove all parameters. + * + * @see org.pentaho.di.core.parameters.NamedParams#eraseParameters() + */ + public void eraseParameters() { + namedParams.eraseParameters(); + } + + /** + * Clear the values of all parameters. + * + * @see org.pentaho.di.core.parameters.NamedParams#clearParameters() + */ + public void clearParameters() { + namedParams.clearParameters(); + } + + /** + * Activates all parameters by setting their values. If no values already exist, the method will attempt to set the + * parameter to the default value. If no default value exists, the method will set the value of the parameter to the + * empty string (""). + * + * @see org.pentaho.di.core.parameters.NamedParams#activateParameters() + */ + public void activateParameters() { + String[] keys = listParameters(); + + for ( String key : keys ) { + String value; + try { + value = getParameterValue( key ); + } catch ( UnknownParamException e ) { + value = ""; + } + + String defValue; + try { + defValue = getParameterDefault( key ); + } catch ( UnknownParamException e ) { + defValue = ""; + } + + if ( Const.isEmpty( value ) ) { + setVariable( key, Const.NVL( defValue, "" ) ); + } else { + setVariable( key, Const.NVL( value, "" ) ); + } + } + } + + /** + * Copy parameters from a NamedParams object. + * + * @param params + * the NamedParams object from which to copy the parameters + * @see org.pentaho.di.core.parameters.NamedParams#copyParametersFrom(org.pentaho.di.core.parameters.NamedParams) + */ + public void copyParametersFrom( NamedParams params ) { + namedParams.copyParametersFrom( params ); + } + + /** + * Gets the parent transformation, which is null if no parent transformation exists. + * + * @return a reference to the parent transformation's Trans object, or null if no parent transformation exists + */ + public Trans getParentTrans() { + return parentTrans; + } + + /** + * Sets the parent transformation. + * + * @param parentTrans + * the parentTrans to set + */ + public void setParentTrans( Trans parentTrans ) { + this.logLevel = parentTrans.getLogLevel(); + this.log.setLogLevel( logLevel ); + this.parentTrans = parentTrans; + + transactionId = calculateTransactionId(); + } + + /** + * Gets the mapping step name. + * + * @return the name of the mapping step that created this transformation + */ + public String getMappingStepName() { + return mappingStepName; + } + + /** + * Sets the mapping step name. + * + * @param mappingStepName + * the name of the mapping step that created this transformation + */ + public void setMappingStepName( String mappingStepName ) { + this.mappingStepName = mappingStepName; + } + + /** + * Sets the socket repository. + * + * @param socketRepository + * the new socket repository + */ + public void setSocketRepository( SocketRepository socketRepository ) { + this.socketRepository = socketRepository; + } + + /** + * Gets the socket repository. + * + * @return the socket repository + */ + public SocketRepository getSocketRepository() { + return socketRepository; + } + + /** + * Gets the object name. + * + * @return the object name + * @see org.pentaho.di.core.logging.LoggingObjectInterface#getObjectName() + */ + public String getObjectName() { + return getName(); + } + + /** + * Gets the object copy. For Trans, this always returns null + * + * @return null + * @see org.pentaho.di.core.logging.LoggingObjectInterface#getObjectCopy() + */ + public String getObjectCopy() { + return null; + } + + /** + * Gets the filename of the transformation, or null if no filename exists + * + * @return the filename + * @see org.pentaho.di.core.logging.LoggingObjectInterface#getFilename() + */ + public String getFilename() { + if ( transMeta == null ) { + return null; + } + return transMeta.getFilename(); + } + + /** + * Gets the log channel ID. + * + * @return the log channel ID + * @see org.pentaho.di.core.logging.LoggingObjectInterface#getLogChannelId() + */ + public String getLogChannelId() { + return log.getLogChannelId(); + } + + /** + * Gets the object ID. + * + * @return the object ID + * @see org.pentaho.di.core.logging.LoggingObjectInterface#getObjectId() + */ + public ObjectId getObjectId() { + if ( transMeta == null ) { + return null; + } + return transMeta.getObjectId(); + } + + /** + * Gets the object revision. + * + * @return the object revision + * @see org.pentaho.di.core.logging.LoggingObjectInterface#getObjectRevision() + */ + public ObjectRevision getObjectRevision() { + if ( transMeta == null ) { + return null; + } + return transMeta.getObjectRevision(); + } + + /** + * Gets the object type. For Trans, this always returns LoggingObjectType.TRANS + * + * @return the object type + * @see org.pentaho.di.core.logging.LoggingObjectInterface#getObjectType() + */ + public LoggingObjectType getObjectType() { + return LoggingObjectType.TRANS; + } + + /** + * Gets the parent logging object interface. + * + * @return the parent + * @see org.pentaho.di.core.logging.LoggingObjectInterface#getParent() + */ + public LoggingObjectInterface getParent() { + return parent; + } + + /** + * Gets the repository directory. + * + * @return the repository directory + * @see org.pentaho.di.core.logging.LoggingObjectInterface#getRepositoryDirectory() + */ + public RepositoryDirectoryInterface getRepositoryDirectory() { + if ( transMeta == null ) { + return null; + } + return transMeta.getRepositoryDirectory(); + } + + /** + * Gets the log level. + * + * @return the log level + * @see org.pentaho.di.core.logging.LoggingObjectInterface#getLogLevel() + */ + public LogLevel getLogLevel() { + return logLevel; + } + + /** + * Sets the log level. + * + * @param logLevel + * the new log level + */ + public void setLogLevel( LogLevel logLevel ) { + this.logLevel = logLevel; + log.setLogLevel( logLevel ); + } + + /** + * Gets the logging hierarchy. + * + * @return the logging hierarchy + */ + public List getLoggingHierarchy() { + List hierarchy = new ArrayList(); + List childIds = LoggingRegistry.getInstance().getLogChannelChildren( getLogChannelId() ); + for ( String childId : childIds ) { + LoggingObjectInterface loggingObject = LoggingRegistry.getInstance().getLoggingObject( childId ); + if ( loggingObject != null ) { + hierarchy.add( new LoggingHierarchy( getLogChannelId(), batchId, loggingObject ) ); + } + } + + return hierarchy; + } + + /** + * Gets the active sub-transformations. + * + * @return a map (by name) of the active sub-transformations + */ + public Map getActiveSubtransformations() { + return activeSubtransformations; + } + + /** + * Gets the active sub-jobs. + * + * @return a map (by name) of the active sub-jobs + */ + public Map getActiveSubjobs() { + return activeSubjobs; + } + + /** + * Gets the container object ID. + * + * @return the Carte object ID + */ + public String getContainerObjectId() { + return containerObjectId; + } + + /** + * Sets the container object ID. + * + * @param containerObjectId + * the Carte object ID to set + */ + public void setContainerObjectId( String containerObjectId ) { + this.containerObjectId = containerObjectId; + } + + /** + * Gets the registration date. For Trans, this always returns null + * + * @return null + */ + public Date getRegistrationDate() { + return null; + } + + /** + * Sets the servlet print writer. + * + * @param servletPrintWriter + * the new servlet print writer + */ + public void setServletPrintWriter( PrintWriter servletPrintWriter ) { + this.servletPrintWriter = servletPrintWriter; + } + + /** + * Gets the servlet print writer. + * + * @return the servlet print writer + */ + public PrintWriter getServletPrintWriter() { + return servletPrintWriter; + } + + /** + * Gets the name of the executing server. + * + * @return the executingServer + */ + public String getExecutingServer() { + return executingServer; + } + + /** + * Sets the name of the executing server. + * + * @param executingServer + * the executingServer to set + */ + public void setExecutingServer( String executingServer ) { + this.executingServer = executingServer; + } + + /** + * Gets the name of the executing user. + * + * @return the executingUser + */ + public String getExecutingUser() { + return executingUser; + } + + /** + * Sets the name of the executing user. + * + * @param executingUser + * the executingUser to set + */ + public void setExecutingUser( String executingUser ) { + this.executingUser = executingUser; + } + + @Override + public boolean isGatheringMetrics() { + return log != null && log.isGatheringMetrics(); + } + + @Override + public void setGatheringMetrics( boolean gatheringMetrics ) { + if ( log != null ) { + log.setGatheringMetrics( gatheringMetrics ); + } + } + + @Override + public boolean isForcingSeparateLogging() { + return log != null && log.isForcingSeparateLogging(); + } + + @Override + public void setForcingSeparateLogging( boolean forcingSeparateLogging ) { + if ( log != null ) { + log.setForcingSeparateLogging( forcingSeparateLogging ); + } + } + + public List getResultFiles() { + return resultFiles; + } + + public void setResultFiles( List resultFiles ) { + this.resultFiles = resultFiles; + } + + public List getResultRows() { + return resultRows; + } + + public void setResultRows( List resultRows ) { + this.resultRows = resultRows; + } + + public Result getPreviousResult() { + return previousResult; + } + + public void setPreviousResult( Result previousResult ) { + this.previousResult = previousResult; + } + + public Hashtable getCounters() { + return counters; + } + + public void setCounters( Hashtable counters ) { + this.counters = counters; + } + + public String[] getArguments() { + return arguments; + } + + public void setArguments( String[] arguments ) { + this.arguments = arguments; + } + + /** + * Clear the error in the transformation, clear all the rows from all the row sets, to make sure the transformation + * can continue with other data. This is intended for use when running single threaded. + */ + public void clearError() { + stopped.set( false ); + errors.set( 0 ); + setFinished( false ); + for ( StepMetaDataCombi combi : steps ) { + StepInterface step = combi.step; + for ( RowSet rowSet : step.getInputRowSets() ) { + rowSet.clear(); + } + step.setStopped( false ); + } + } + + /** + * Gets the transaction ID for the transformation. + * + * @return the transactionId + */ + public String getTransactionId() { + return transactionId; + } + + /** + * Sets the transaction ID for the transformation. + * + * @param transactionId + * the transactionId to set + */ + public void setTransactionId( String transactionId ) { + this.transactionId = transactionId; + } + + /** + * Calculates the transaction ID for the transformation. + * + * @return the calculated transaction ID for the transformation. + */ + public String calculateTransactionId() { + if ( getTransMeta() != null && getTransMeta().isUsingUniqueConnections() ) { + if ( parentJob != null && parentJob.getTransactionId() != null ) { + return parentJob.getTransactionId(); + } else if ( parentTrans != null && parentTrans.getTransMeta().isUsingUniqueConnections() ) { + return parentTrans.getTransactionId(); + } else { + return DatabaseConnectionMap.getInstance().getNextTransactionId(); + } + } else { + return Thread.currentThread().getName(); + } + } + + public IMetaStore getMetaStore() { + return metaStore; + } + + public void setMetaStore( IMetaStore metaStore ) { + this.metaStore = metaStore; + if ( transMeta != null ) { + transMeta.setMetaStore( metaStore ); + } + } + + /** + * Sets encoding of HttpServletResponse according to System encoding.Check if system encoding is null or an empty and + * set it to HttpServletResponse when not and writes error to log if null. Throw IllegalArgumentException if input + * parameter is null. + * + * @param response + * the HttpServletResponse to set encoding, mayn't be null + */ + public void setServletReponse( HttpServletResponse response ) { + if ( response == null ) { + throw new IllegalArgumentException( "Response is not valid: " + response ); + } + String encoding = System.getProperty( "KETTLE_DEFAULT_SERVLET_ENCODING", null ); + // true if encoding is null or an empty (also for the next kin of strings: " ") + if ( !StringUtils.isBlank( encoding ) ) { + try { + response.setCharacterEncoding( encoding.trim() ); + response.setContentType( "text/html; charset=" + encoding ); + } catch ( Exception ex ) { + LogChannel.GENERAL.logError( "Unable to encode data with encoding : '" + encoding + "'", ex ); + } + } + this.servletResponse = response; + } + + public HttpServletResponse getServletResponse() { + return servletResponse; + } + + public void setServletRequest( HttpServletRequest request ) { + this.servletRequest = request; + } + + public HttpServletRequest getServletRequest() { + return servletRequest; + } + + public List getDelegationListeners() { + return delegationListeners; + } + + public void setDelegationListeners( List delegationListeners ) { + this.delegationListeners = delegationListeners; + } + + public void addDelegationListener( DelegationListener delegationListener ) { + delegationListeners.add( delegationListener ); + } + + public synchronized void doTopologySortOfSteps() { + // The bubble sort algorithm in contrast to the QuickSort or MergeSort + // algorithms + // does indeed cover all possibilities. + // Sorting larger transformations with hundreds of steps might be too slow + // though. + // We should consider caching TransMeta.findPrevious() results in that case. + // + transMeta.clearCaches(); + + // + // Cocktail sort (bi-directional bubble sort) + // + // Original sort was taking 3ms for 30 steps + // cocktail sort takes about 8ms for the same 30, but it works :) + // + int stepsMinSize = 0; + int stepsSize = steps.size(); + + // Noticed a problem with an immediate shrinking iteration window + // trapping rows that need to be sorted. + // This threshold buys us some time to get the sorting close before + // starting to decrease the window size. + // + // TODO: this could become much smarter by tracking row movement + // and reacting to that each outer iteration verses + // using a threshold. + // + // After this many iterations enable trimming inner iteration + // window on no change being detected. + // + int windowShrinkThreshold = (int) Math.round( stepsSize * 0.75 ); + + // give ourselves some room to sort big lists. the window threshold should + // stop us before reaching this anyway. + // + int totalIterations = stepsSize * 2; + + boolean isBefore = false; + boolean forwardChange = false; + boolean backwardChange = false; + + boolean lastForwardChange = true; + boolean keepSortingForward = true; + + StepMetaDataCombi one = null; + StepMetaDataCombi two = null; + + for ( int x = 0; x < totalIterations; x++ ) { + + // Go forward through the list + // + if ( keepSortingForward ) { + for ( int y = stepsMinSize; y < stepsSize - 1; y++ ) { + one = steps.get( y ); + two = steps.get( y + 1 ); + + if ( one.stepMeta.equals( two.stepMeta ) ) { + isBefore = one.copy > two.copy; + } else { + isBefore = transMeta.findPrevious( one.stepMeta, two.stepMeta ); + } + if ( isBefore ) { + // two was found to be positioned BEFORE one so we need to + // switch them... + // + steps.set( y, two ); + steps.set( y + 1, one ); + forwardChange = true; + + } + } + } + + // Go backward through the list + // + for ( int z = stepsSize - 1; z > stepsMinSize; z-- ) { + one = steps.get( z ); + two = steps.get( z - 1 ); + + if ( one.stepMeta.equals( two.stepMeta ) ) { + isBefore = one.copy > two.copy; + } else { + isBefore = transMeta.findPrevious( one.stepMeta, two.stepMeta ); + } + if ( !isBefore ) { + // two was found NOT to be positioned BEFORE one so we need to + // switch them... + // + steps.set( z, two ); + steps.set( z - 1, one ); + backwardChange = true; + } + } + + // Shrink stepsSize(max) if there was no forward change + // + if ( x > windowShrinkThreshold && !forwardChange ) { + + // should we keep going? check the window size + // + stepsSize--; + if ( stepsSize <= stepsMinSize ) { + break; + } + } + + // shrink stepsMinSize(min) if there was no backward change + // + if ( x > windowShrinkThreshold && !backwardChange ) { + + // should we keep going? check the window size + // + stepsMinSize++; + if ( stepsMinSize >= stepsSize ) { + break; + } + } + + // End of both forward and backward traversal. + // Time to see if we should keep going. + // + if ( !forwardChange && !backwardChange ) { + break; + } + + // + // if we are past the first iteration and there has been no change twice, + // quit doing it! + // + if ( keepSortingForward && x > 0 && !lastForwardChange && !forwardChange ) { + keepSortingForward = false; + } + lastForwardChange = forwardChange; + forwardChange = false; + backwardChange = false; + + } // finished sorting + } + + @Override + public Map getExtensionDataMap() { + return extensionDataMap; + } + + protected ExecutorService startHeartbeat( final long intervalInSeconds ) { + + ScheduledExecutorService heartbeat = Executors.newSingleThreadScheduledExecutor( new ThreadFactory() { + + @Override + public Thread newThread( Runnable r ) { + Thread thread = new Thread( r, "Transformation Heartbeat Thread for: " + getName() ); + thread.setDaemon( true ); + return thread; + } + } ); + + heartbeat.scheduleAtFixedRate( new Runnable() { + public void run() { + try { + + if ( Trans.this.isFinished() ) { + log.logBasic( "Shutting down heartbeat signal for " + getName() ); + shutdownHeartbeat( Trans.this.heartbeat ); + return; + } + + log.logDebug( "Triggering heartbeat signal for " + getName() + " at every " + intervalInSeconds + + " seconds" ); + ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.TransformationHeartbeat.id, Trans.this ); + + } catch ( KettleException e ) { + log.logError( e.getMessage(), e ); + } + } + }, intervalInSeconds /* initial delay */, intervalInSeconds /* interval delay */, TimeUnit.SECONDS ); + + return heartbeat; + } + + protected void shutdownHeartbeat( ExecutorService heartbeat ) { + + if ( heartbeat != null ) { + + try { + heartbeat.shutdownNow(); // prevents waiting tasks from starting and attempts to stop currently executing ones + + } catch ( Throwable t ) { + /* do nothing */ + } + } + } + + private int getHeartbeatIntervalInSeconds() { + + TransMeta meta = this.getTransMeta(); + + // 1 - check if there's a user defined value ( transformation-specific ) heartbeat periodic interval; + // 2 - check if there's a default defined value ( transformation-specific ) heartbeat periodic interval; + // 3 - use default Const.HEARTBEAT_PERIODIC_INTERVAL_IN_SECS if none of the above have been set + + try { + + if ( meta != null ) { + + return Const.toInt( meta.getParameterValue( Const.VARIABLE_HEARTBEAT_PERIODIC_INTERVAL_SECS ), Const.toInt( meta + .getParameterDefault( Const.VARIABLE_HEARTBEAT_PERIODIC_INTERVAL_SECS ), + Const.HEARTBEAT_PERIODIC_INTERVAL_IN_SECS ) ); + } + + } catch ( Exception e ) { + /* do nothing, return Const.HEARTBEAT_PERIODIC_INTERVAL_IN_SECS */ + } + + return Const.HEARTBEAT_PERIODIC_INTERVAL_IN_SECS; + } +} diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/TransMeta.java b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/TransMeta.java new file mode 100644 index 0000000..b2eb0eb --- /dev/null +++ b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/TransMeta.java @@ -0,0 +1,6265 @@ +//CHECKSTYLE:FileLength:OFF +/*! ****************************************************************************** + * + * Pentaho Data Integration + * + * Copyright (C) 2002-2015 by Pentaho : http://www.pentaho.com + * + ******************************************************************************* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + ******************************************************************************/ + +package org.pentaho.di.trans; + +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Hashtable; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.apache.commons.vfs2.FileName; +import org.apache.commons.vfs2.FileObject; +import org.apache.commons.vfs2.FileSystemException; +import org.pentaho.di.base.AbstractMeta; +import org.pentaho.di.cluster.ClusterSchema; +import org.pentaho.di.cluster.SlaveServer; +import org.pentaho.di.core.CheckResult; +import org.pentaho.di.core.CheckResultInterface; +import org.pentaho.di.core.Const; +import org.pentaho.di.core.Counter; +import org.pentaho.di.core.DBCache; +import org.pentaho.di.core.LastUsedFile; +import org.pentaho.di.core.NotePadMeta; +import org.pentaho.di.core.ProgressMonitorListener; +import org.pentaho.di.core.Props; +import org.pentaho.di.core.Result; +import org.pentaho.di.core.ResultFile; +import org.pentaho.di.core.RowMetaAndData; +import org.pentaho.di.core.SQLStatement; +import org.pentaho.di.core.attributes.AttributesUtil; +import org.pentaho.di.core.database.Database; +import org.pentaho.di.core.database.DatabaseMeta; +import org.pentaho.di.core.exception.KettleDatabaseException; +import org.pentaho.di.core.exception.KettleException; +import org.pentaho.di.core.exception.KettleFileException; +import org.pentaho.di.core.exception.KettleMissingPluginsException; +import org.pentaho.di.core.exception.KettleRowException; +import org.pentaho.di.core.exception.KettleStepException; +import org.pentaho.di.core.exception.KettleXMLException; +import org.pentaho.di.core.extension.ExtensionPointHandler; +import org.pentaho.di.core.extension.KettleExtensionPoint; +import org.pentaho.di.core.gui.OverwritePrompter; +import org.pentaho.di.core.gui.Point; +import org.pentaho.di.core.logging.ChannelLogTable; +import org.pentaho.di.core.logging.LogChannel; +import org.pentaho.di.core.logging.LogChannelInterface; +import org.pentaho.di.core.logging.LogStatus; +import org.pentaho.di.core.logging.LogTableInterface; +import org.pentaho.di.core.logging.LoggingObjectInterface; +import org.pentaho.di.core.logging.LoggingObjectType; +import org.pentaho.di.core.logging.MetricsLogTable; +import org.pentaho.di.core.logging.PerformanceLogTable; +import org.pentaho.di.core.logging.StepLogTable; +import org.pentaho.di.core.logging.TransLogTable; +import org.pentaho.di.core.parameters.NamedParamsDefault; +import org.pentaho.di.core.reflection.StringSearchResult; +import org.pentaho.di.core.reflection.StringSearcher; +import org.pentaho.di.core.row.RowMeta; +import org.pentaho.di.core.row.RowMetaInterface; +import org.pentaho.di.core.row.ValueMetaInterface; +import org.pentaho.di.core.undo.TransAction; +import org.pentaho.di.core.util.StringUtil; +import org.pentaho.di.core.variables.VariableSpace; +import org.pentaho.di.core.vfs.KettleVFS; +import org.pentaho.di.core.xml.XMLHandler; +import org.pentaho.di.core.xml.XMLInterface; +import org.pentaho.di.i18n.BaseMessages; +import org.pentaho.di.partition.PartitionSchema; +import org.pentaho.di.repository.HasRepositoryInterface; +import org.pentaho.di.repository.Repository; +import org.pentaho.di.repository.RepositoryDirectory; +import org.pentaho.di.repository.RepositoryElementInterface; +import org.pentaho.di.repository.RepositoryObjectType; +import org.pentaho.di.resource.ResourceDefinition; +import org.pentaho.di.resource.ResourceExportInterface; +import org.pentaho.di.resource.ResourceNamingInterface; +import org.pentaho.di.resource.ResourceReference; +import org.pentaho.di.shared.SharedObjectInterface; +import org.pentaho.di.shared.SharedObjects; +import org.pentaho.di.trans.step.BaseStep; +import org.pentaho.di.trans.step.RemoteStep; +import org.pentaho.di.trans.step.StepErrorMeta; +import org.pentaho.di.trans.step.StepMeta; +import org.pentaho.di.trans.step.StepMetaChangeListenerInterface; +import org.pentaho.di.trans.step.StepMetaInterface; +import org.pentaho.di.trans.step.StepPartitioningMeta; +import org.pentaho.di.trans.steps.jobexecutor.JobExecutorMeta; +import org.pentaho.di.trans.steps.mapping.MappingMeta; +import org.pentaho.di.trans.steps.missing.MissingTrans; +import org.pentaho.di.trans.steps.singlethreader.SingleThreaderMeta; +import org.pentaho.di.trans.steps.transexecutor.TransExecutorMeta; +import org.pentaho.metastore.api.IMetaStore; +import org.pentaho.metastore.api.exceptions.MetaStoreException; +import org.w3c.dom.Document; +import org.w3c.dom.Node; + +/** + * This class defines information about a transformation and offers methods to save and load it from XML or a PDI + * database repository, as well as methods to alter a transformation by adding/removing databases, steps, hops, etc. + * + * @since 20-jun-2003 + * @author Matt Casters + */ +public class TransMeta extends AbstractMeta + implements XMLInterface, Comparator, Comparable, Cloneable, ResourceExportInterface, + RepositoryElementInterface, LoggingObjectInterface { + + /** The package name, used for internationalization of messages. */ + private static Class PKG = Trans.class; // for i18n purposes, needed by Translator2!! + + /** A constant specifying the tag value for the XML node of the transformation. */ + public static final String XML_TAG = "transformation"; + + /** + * A constant used by the logging operations to indicate any logged messages are related to transformation meta-data. + */ + public static final String STRING_TRANSMETA = "Transformation metadata"; + + /** A constant specifying the repository element type as a Transformation. */ + public static final RepositoryObjectType REPOSITORY_ELEMENT_TYPE = RepositoryObjectType.TRANSFORMATION; + + public static final int BORDER_INDENT = 20; + /** The list of steps associated with the transformation. */ + protected List steps; + + /** The list of hops associated with the transformation. */ + protected List hops; + + /** The list of dependencies associated with the transformation. */ + protected List dependencies; + + /** The list of cluster schemas associated with the transformation. */ + protected List clusterSchemas; + + /** The list of partition schemas associated with the transformation. */ + private List partitionSchemas; + + /** The version string for the transformation. */ + protected String trans_version; + + /** The status of the transformation. */ + protected int trans_status; + + /** The transformation logging table associated with the transformation. */ + protected TransLogTable transLogTable; + + /** The performance logging table associated with the transformation. */ + protected PerformanceLogTable performanceLogTable; + + /** The step logging table associated with the transformation. */ + protected StepLogTable stepLogTable; + + /** The metricslogging table associated with the transformation. */ + protected MetricsLogTable metricsLogTable; + + /** The size of the current rowset. */ + protected int sizeRowset; + + /** The meta-data for the database connection associated with "max date" auditing information. */ + protected DatabaseMeta maxDateConnection; + + /** The table name associated with "max date" auditing information. */ + protected String maxDateTable; + + /** The field associated with "max date" auditing information. */ + protected String maxDateField; + + /** The amount by which to increase the "max date" value. */ + protected double maxDateOffset; + + /** The maximum date difference used for "max date" auditing and limiting job sizes. */ + protected double maxDateDifference; + + /** + * The list of arguments to the transformation. + * + * @deprecated Moved to Trans + * */ + @Deprecated + protected String[] arguments; + + /** + * A table of named counters. + * + * @deprecated Moved to Trans + */ + @Deprecated + protected Hashtable counters; + + /** Indicators for changes in steps, databases, hops, and notes. */ + protected boolean changed_steps, changed_hops; + + /** The database cache. */ + protected DBCache dbCache; + + /** The time (in nanoseconds) to wait when the input buffer is empty. */ + protected int sleepTimeEmpty; + + /** The time (in nanoseconds) to wait when the input buffer is full. */ + protected int sleepTimeFull; + + /** The previous result. */ + protected Result previousResult; + + /** + * The result rows. + * + * @deprecated + * */ + @Deprecated + protected List resultRows; + + /** + * The result files. + * + * @deprecated + * */ + @Deprecated + protected List resultFiles; + + /** Whether the transformation is using unique connections. */ + protected boolean usingUniqueConnections; + + /** Whether the feedback is shown. */ + protected boolean feedbackShown; + + /** The feedback size. */ + protected int feedbackSize; + + /** + * Flag to indicate thread management usage. Set to default to false from version 2.5.0 on. Before that it was enabled + * by default. + */ + protected boolean usingThreadPriorityManagment; + + /** The slave-step-copy/partition distribution. Only used for slave transformations in a clustering environment. */ + protected SlaveStepCopyPartitionDistribution slaveStepCopyPartitionDistribution; + + /** Just a flag indicating that this is a slave transformation - internal use only, no GUI option. */ + protected boolean slaveTransformation; + + /** Whether the transformation is capturing step performance snap shots. */ + protected boolean capturingStepPerformanceSnapShots; + + /** The step performance capturing delay. */ + protected long stepPerformanceCapturingDelay; + + /** The step performance capturing size limit. */ + protected String stepPerformanceCapturingSizeLimit; + + /** The steps fields cache. */ + protected Map stepsFieldsCache; + + /** The loop cache. */ + protected Map loopCache; + + /** The log channel interface. */ + protected LogChannelInterface log; + + /** The list of StepChangeListeners */ + protected List stepChangeListeners; + + protected byte[] keyForSessionKey; + boolean isKeyPrivate; + private ArrayList missingTrans; + + /** + * The TransformationType enum describes the various types of transformations in terms of execution, including Normal, + * Serial Single-Threaded, and Single-Threaded. + */ + public enum TransformationType { + + /** A normal transformation. */ + Normal( "Normal", BaseMessages.getString( PKG, "TransMeta.TransformationType.Normal" ) ), + + /** A serial single-threaded transformation. */ + SerialSingleThreaded( "SerialSingleThreaded", BaseMessages.getString( + PKG, "TransMeta.TransformationType.SerialSingleThreaded" ) ), + + /** A single-threaded transformation. */ + SingleThreaded( "SingleThreaded", BaseMessages + .getString( PKG, "TransMeta.TransformationType.SingleThreaded" ) ); + + /** The code corresponding to the transformation type. */ + private String code; + + /** The description of the transformation type. */ + private String description; + + /** + * Instantiates a new transformation type. + * + * @param code + * the code + * @param description + * the description + */ + private TransformationType( String code, String description ) { + this.code = code; + this.description = description; + } + + /** + * Gets the code corresponding to the transformation type. + * + * @return the code + */ + public String getCode() { + return code; + } + + /** + * Gets the description of the transformation type. + * + * @return the description + */ + public String getDescription() { + return description; + } + + /** + * Gets the transformation type by code. + * + * @param transTypeCode + * the trans type code + * @return the transformation type by code + */ + public static TransformationType getTransformationTypeByCode( String transTypeCode ) { + if ( transTypeCode != null ) { + for ( TransformationType type : values() ) { + if ( type.code.equalsIgnoreCase( transTypeCode ) ) { + return type; + } + } + } + return Normal; + } + + /** + * Gets the transformation types descriptions. + * + * @return the transformation types descriptions + */ + public static String[] getTransformationTypesDescriptions() { + String[] desc = new String[values().length]; + for ( int i = 0; i < values().length; i++ ) { + desc[i] = values()[i].getDescription(); + } + return desc; + } + } + + /** The transformation type. */ + protected TransformationType transformationType; + + // ////////////////////////////////////////////////////////////////////////// + + /** A list of localized strings corresponding to string descriptions of the undo/redo actions. */ + public static final String[] desc_type_undo = { + "", + BaseMessages.getString( PKG, "TransMeta.UndoTypeDesc.UndoChange" ), + BaseMessages.getString( PKG, "TransMeta.UndoTypeDesc.UndoNew" ), + BaseMessages.getString( PKG, "TransMeta.UndoTypeDesc.UndoDelete" ), + BaseMessages.getString( PKG, "TransMeta.UndoTypeDesc.UndoPosition" ) }; + + /** A constant specifying the tag value for the XML node of the transformation information. */ + protected static final String XML_TAG_INFO = "info"; + + /** A constant specifying the tag value for the XML node of the order of steps. */ + public static final String XML_TAG_ORDER = "order"; + + /** A constant specifying the tag value for the XML node of the notes. */ + public static final String XML_TAG_NOTEPADS = "notepads"; + + /** A constant specifying the tag value for the XML node of the transformation parameters. */ + public static final String XML_TAG_PARAMETERS = "parameters"; + + /** A constant specifying the tag value for the XML node of the transformation dependencies. */ + protected static final String XML_TAG_DEPENDENCIES = "dependencies"; + + /** A constant specifying the tag value for the XML node of the transformation's partition schemas. */ + public static final String XML_TAG_PARTITIONSCHEMAS = "partitionschemas"; + + /** A constant specifying the tag value for the XML node of the slave servers. */ + public static final String XML_TAG_SLAVESERVERS = "slaveservers"; + + /** A constant specifying the tag value for the XML node of the cluster schemas. */ + public static final String XML_TAG_CLUSTERSCHEMAS = "clusterschemas"; + + /** A constant specifying the tag value for the XML node of the steps' error-handling information. */ + public static final String XML_TAG_STEP_ERROR_HANDLING = "step_error_handling"; + + /** + * Builds a new empty transformation. The transformation will have default logging capability and no variables, and + * all internal meta-data is cleared to defaults. + */ + public TransMeta() { + clear(); + initializeVariablesFrom( null ); + } + + /** + * Builds a new empty transformation with a set of variables to inherit from. + * + * @param parent + * the variable space to inherit from + */ + public TransMeta( VariableSpace parent ) { + clear(); + initializeVariablesFrom( parent ); + } + + public TransMeta( String filename, String name ) { + clear(); + setFilename( filename ); + this.name = name; + initializeVariablesFrom( null ); + } + + /** + * Constructs a new transformation specifying the filename, name and arguments. + * + * @param filename + * The filename of the transformation + * @param name + * The name of the transformation + * @param arguments + * The arguments as Strings + * @deprecated passing in arguments (a runtime argument) into the metadata is deprecated, pass it to Trans + */ + @Deprecated + public TransMeta( String filename, String name, String[] arguments ) { + clear(); + setFilename( filename ); + this.name = name; + this.arguments = arguments; + initializeVariablesFrom( null ); + } + + /** + * Compares two transformation on name, filename, repository directory, etc. + * The comparison algorithm is as follows:
+ *
    + *
  1. The first transformation's filename is checked first; if it has none, the transformation comes from a + * repository. If the second transformation does not come from a repository, -1 is returned.
  2. + *
  3. If the transformations are both from a repository, the transformations' names are compared. If the first + * transformation has no name and the second one does, a -1 is returned. + * If the opposite is true, a 1 is returned.
  4. + *
  5. If they both have names they are compared as strings. If the result is non-zero it is returned. Otherwise the + * repository directories are compared using the same technique of checking empty values and then performing a string + * comparison, returning any non-zero result.
  6. + *
  7. If the names and directories are equal, the object revision strings are compared using the same technique of + * checking empty values and then performing a string comparison, this time ultimately returning the result of the + * string compare.
  8. + *
  9. If the first transformation does not come from a repository and the second one does, a 1 is returned. Otherwise + * the transformation names and filenames are subsequently compared using the same technique of checking empty values + * and then performing a string comparison, ultimately returning the result of the filename string comparison. + *
+ * + * @param t1 + * the first transformation to compare + * @param t2 + * the second transformation to compare + * @return 0 if the two transformations are equal, 1 or -1 depending on the values (see description above) + * + */ + public int compare( TransMeta t1, TransMeta t2 ) { + // If we don't have a filename, the transformation comes from a repository + // + if ( Const.isEmpty( t1.getFilename() ) ) { + + if ( !Const.isEmpty( t2.getFilename() ) ) { + return -1; + } + + // First compare names... + // + if ( Const.isEmpty( t1.getName() ) && !Const.isEmpty( t2.getName() ) ) { + return -1; + } + if ( !Const.isEmpty( t1.getName() ) && Const.isEmpty( t2.getName() ) ) { + return 1; + } + int cmpName = t1.getName().compareTo( t2.getName() ); + if ( cmpName != 0 ) { + return cmpName; + } + + // Same name, compare Repository directory... + // + int cmpDirectory = t1.getRepositoryDirectory().getPath().compareTo( t2.getRepositoryDirectory().getPath() ); + if ( cmpDirectory != 0 ) { + return cmpDirectory; + } + + // Same name, same directory, compare versions + // + if ( t1.getObjectRevision() != null && t2.getObjectRevision() == null ) { + return 1; + } + if ( t1.getObjectRevision() == null && t2.getObjectRevision() != null ) { + return -1; + } + if ( t1.getObjectRevision() == null && t2.getObjectRevision() == null ) { + return 0; + } + return t1.getObjectRevision().getName().compareTo( t2.getObjectRevision().getName() ); + + } else { + if ( Const.isEmpty( t2.getFilename() ) ) { + return 1; + } + + // First compare names + // + if ( Const.isEmpty( t1.getName() ) && !Const.isEmpty( t2.getName() ) ) { + return -1; + } + if ( !Const.isEmpty( t1.getName() ) && Const.isEmpty( t2.getName() ) ) { + return 1; + } + int cmpName = t1.getName().compareTo( t2.getName() ); + if ( cmpName != 0 ) { + return cmpName; + } + + // Same name, compare filenames... + // + return t1.getFilename().compareTo( t2.getFilename() ); + } + } + + /** + * Compares this transformation's meta-data to the specified transformation's meta-data. This method simply calls + * compare(this, o) + * + * @param o + * the o + * @return the int + * @see #compare(TransMeta, TransMeta) + * @see java.lang.Comparable#compareTo(java.lang.Object) + */ + public int compareTo( TransMeta o ) { + return compare( this, o ); + } + + /** + * Checks whether this transformation's meta-data object is equal to the specified object. If the specified object is + * not an instance of TransMeta, false is returned. Otherwise the method returns whether a call to compare() indicates + * equality (i.e. compare(this, (TransMeta)obj)==0). + * + * @param obj + * the obj + * @return true, if successful + * @see #compare(TransMeta, TransMeta) + * @see java.lang.Object#equals(java.lang.Object) + */ + public boolean equals( Object obj ) { + if ( !( obj instanceof TransMeta ) ) { + return false; + } + + return compare( this, (TransMeta) obj ) == 0; + } + + /** + * Clones the transformation meta-data object. + * + * @return a clone of the transformation meta-data object + * @see java.lang.Object#clone() + */ + @Override + public Object clone() { + return realClone( true ); + } + + /** + * Perform a real clone of the transformation meta-data object, including cloning all lists and copying all values. If + * the doClear parameter is true, the clone will be cleared of ALL values before the copy. If false, only the copied + * fields will be cleared. + * + * @param doClear + * Whether to clear all of the clone's data before copying from the source object + * @return a real clone of the calling object + */ + public Object realClone( boolean doClear ) { + + try { + TransMeta transMeta = (TransMeta) super.clone(); + if ( doClear ) { + transMeta.clear(); + } else { + // Clear out the things we're replacing below + transMeta.databases = new ArrayList(); + transMeta.steps = new ArrayList(); + transMeta.hops = new ArrayList(); + transMeta.notes = new ArrayList(); + transMeta.dependencies = new ArrayList(); + transMeta.partitionSchemas = new ArrayList(); + transMeta.slaveServers = new ArrayList(); + transMeta.clusterSchemas = new ArrayList(); + transMeta.namedParams = new NamedParamsDefault(); + transMeta.stepChangeListeners = new ArrayList(); + } + for ( DatabaseMeta db : databases ) { + transMeta.addDatabase( (DatabaseMeta) db.clone() ); + } + for ( StepMeta step : steps ) { + transMeta.addStep( (StepMeta) step.clone() ); + } + for ( TransHopMeta hop : hops ) { + transMeta.addTransHop( (TransHopMeta) hop.clone() ); + } + for ( NotePadMeta note : notes ) { + transMeta.addNote( (NotePadMeta) note.clone() ); + } + for ( TransDependency dep : dependencies ) { + transMeta.addDependency( (TransDependency) dep.clone() ); + } + for ( SlaveServer slave : slaveServers ) { + transMeta.getSlaveServers().add( (SlaveServer) slave.clone() ); + } + for ( ClusterSchema schema : clusterSchemas ) { + transMeta.getClusterSchemas().add( schema.clone() ); + } + for ( PartitionSchema schema : partitionSchemas ) { + transMeta.getPartitionSchemas().add( (PartitionSchema) schema.clone() ); + } + for ( String key : listParameters() ) { + transMeta.addParameterDefinition( key, getParameterDefault( key ), getParameterDescription( key ) ); + } + + return transMeta; + } catch ( Exception e ) { + e.printStackTrace(); + return null; + } + } + + /** + * Clears the transformation's meta-data, including the lists of databases, steps, hops, notes, dependencies, + * partition schemas, slave servers, and cluster schemas. Logging information and timeouts are reset to defaults, and + * recent connection info is cleared. + */ + @Override + public void clear() { + setObjectId( null ); + steps = new ArrayList(); + hops = new ArrayList(); + dependencies = new ArrayList(); + partitionSchemas = new ArrayList(); + clusterSchemas = new ArrayList(); + stepChangeListeners = new ArrayList(); + + slaveStepCopyPartitionDistribution = new SlaveStepCopyPartitionDistribution(); + + trans_status = -1; + trans_version = null; + + transLogTable = TransLogTable.getDefault( this, this, steps ); + performanceLogTable = PerformanceLogTable.getDefault( this, this ); + stepLogTable = StepLogTable.getDefault( this, this ); + metricsLogTable = MetricsLogTable.getDefault( this, this ); + + sizeRowset = Const.ROWS_IN_ROWSET; + sleepTimeEmpty = Const.TIMEOUT_GET_MILLIS; + sleepTimeFull = Const.TIMEOUT_PUT_MILLIS; + + maxDateConnection = null; + maxDateTable = null; + maxDateField = null; + maxDateOffset = 0.0; + + maxDateDifference = 0.0; + + undo = new ArrayList(); + max_undo = Const.MAX_UNDO; + undo_position = -1; + + counters = new Hashtable(); + resultRows = null; + + super.clear(); + + // LOAD THE DATABASE CACHE! + dbCache = DBCache.getInstance(); + + resultRows = new ArrayList(); + resultFiles = new ArrayList(); + + feedbackShown = true; + feedbackSize = Const.ROWS_UPDATE; + + // Thread priority: + // - set to false in version 2.5.0 + // - re-enabling in version 3.0.1 to prevent excessive locking (PDI-491) + // + usingThreadPriorityManagment = true; + + // The performance monitoring options + // + capturingStepPerformanceSnapShots = false; + stepPerformanceCapturingDelay = 1000; // every 1 seconds + stepPerformanceCapturingSizeLimit = "100"; // maximum 100 data points + + stepsFieldsCache = new HashMap(); + loopCache = new HashMap(); + transformationType = TransformationType.Normal; + + log = LogChannel.GENERAL; + } + + /** + * Add a new step to the transformation. Also marks that the transformation's steps have changed. + * + * @param stepMeta + * The meta-data for the step to be added. + */ + public void addStep( StepMeta stepMeta ) { + steps.add( stepMeta ); + stepMeta.setParentTransMeta( this ); + StepMetaInterface iface = stepMeta.getStepMetaInterface(); + if ( iface instanceof StepMetaChangeListenerInterface ) { + addStepChangeListener( (StepMetaChangeListenerInterface) iface ); + } + changed_steps = true; + } + + /** + * Add a new step to the transformation if that step didn't exist yet. Otherwise, replace the step. This method also + * marks that the transformation's steps have changed. + * + * @param stepMeta + * The meta-data for the step to be added. + */ + public void addOrReplaceStep( StepMeta stepMeta ) { + int index = steps.indexOf( stepMeta ); + if ( index < 0 ) { + index = steps.add( stepMeta ) ? 0 : index; + } else { + StepMeta previous = getStep( index ); + previous.replaceMeta( stepMeta ); + } + stepMeta.setParentTransMeta( this ); + StepMetaInterface iface = stepMeta.getStepMetaInterface(); + if ( index != -1 && iface instanceof StepMetaChangeListenerInterface ) { + addStepChangeListener( index, (StepMetaChangeListenerInterface) iface ); + } + changed_steps = true; + } + + /** + * Add a new hop to the transformation. The hop information (source and target steps, e.g.) should be configured in + * the TransHopMeta object before calling addTransHop(). Also marks that the transformation's hops have changed. + * + * @param hi + * The hop meta-data to be added. + */ + public void addTransHop( TransHopMeta hi ) { + hops.add( hi ); + changed_hops = true; + } + + /** + * Add a new dependency to the transformation. + * + * @param td + * The transformation dependency to be added. + */ + public void addDependency( TransDependency td ) { + dependencies.add( td ); + } + + /** + * Add a new step to the transformation at the specified index. This method sets the step's parent transformation to + * the this transformation, and marks that the transformations' steps have changed. + * + * @param p + * The index into the step list + * @param stepMeta + * The step to be added. + */ + public void addStep( int p, StepMeta stepMeta ) { + StepMetaInterface iface = stepMeta.getStepMetaInterface(); + if ( iface instanceof StepMetaChangeListenerInterface ) { + addStepChangeListener( p, (StepMetaChangeListenerInterface) stepMeta.getStepMetaInterface() ); + } + steps.add( p, stepMeta ); + stepMeta.setParentTransMeta( this ); + changed_steps = true; + } + + /** + * Add a new hop to the transformation on a certain location (i.e. the specified index). Also marks that the + * transformation's hops have changed. + * + * @param p + * the index into the hop list + * @param hi + * The hop to be added. + */ + public void addTransHop( int p, TransHopMeta hi ) { + try { + hops.add( p, hi ); + } catch ( IndexOutOfBoundsException e ) { + hops.add( hi ); + } + changed_hops = true; + } + + /** + * Add a new dependency to the transformation on a certain location (i.e. the specified index). + * + * @param p + * The index into the dependencies list. + * @param td + * The transformation dependency to be added. + */ + public void addDependency( int p, TransDependency td ) { + dependencies.add( p, td ); + } + + /** + * Get a list of defined steps in this transformation. + * + * @return an ArrayList of defined steps. + */ + public List getSteps() { + return steps; + } + + /** + * Retrieves a step on a certain location (i.e. the specified index). + * + * @param i + * The index into the steps list. + * @return The desired step's meta-data. + */ + public StepMeta getStep( int i ) { + return steps.get( i ); + } + + /** + * Retrieves a hop on a certain location (i.e. the specified index). + * + * @param i + * The index into the hops list. + * @return The desired hop's meta-data. + */ + public TransHopMeta getTransHop( int i ) { + return hops.get( i ); + } + + /** + * Retrieves a dependency on a certain location (i.e. the specified index). + * + * @param i + * The index into the dependencies list. + * @return The dependency object. + */ + public TransDependency getDependency( int i ) { + return dependencies.get( i ); + } + + /** + * Removes a step from the transformation on a certain location (i.e. the specified index). Also marks that the + * transformation's steps have changed. + * + * @param i + * The index + */ + public void removeStep( int i ) { + if ( i < 0 || i >= steps.size() ) { + return; + } + + StepMeta removeStep = steps.get( i ); + StepMetaInterface iface = removeStep.getStepMetaInterface(); + if ( iface instanceof StepMetaChangeListenerInterface ) { + removeStepChangeListener( (StepMetaChangeListenerInterface) iface ); + } + + steps.remove( i ); + + if ( removeStep.getStepMetaInterface() instanceof MissingTrans ) { + removeMissingTrans( (MissingTrans) removeStep.getStepMetaInterface() ); + } + + changed_steps = true; + } + + /** + * Removes a hop from the transformation on a certain location (i.e. the specified index). Also marks that the + * transformation's hops have changed. + * + * @param i + * The index into the hops list + */ + public void removeTransHop( int i ) { + if ( i < 0 || i >= hops.size() ) { + return; + } + + hops.remove( i ); + changed_hops = true; + } + + /** + * Removes a hop from the transformation. Also marks that the + * transformation's hops have changed. + * + * @param hop + * The hop to remove from the list of hops + */ + public void removeTransHop( TransHopMeta hop ) { + hops.remove( hop ); + changed_hops = true; + } + + /** + * Removes a dependency from the transformation on a certain location (i.e. the specified index). + * + * @param i + * The location + */ + public void removeDependency( int i ) { + if ( i < 0 || i >= dependencies.size() ) { + return; + } + dependencies.remove( i ); + } + + /** + * Clears all the dependencies from the transformation. + */ + public void removeAllDependencies() { + dependencies.clear(); + } + + /** + * Gets the number of steps in the transformation. + * + * @return The number of steps in the transformation. + */ + public int nrSteps() { + return steps.size(); + } + + /** + * Gets the number of hops in the transformation. + * + * @return The number of hops in the transformation. + */ + public int nrTransHops() { + return hops.size(); + } + + /** + * Gets the number of dependencies in the transformation. + * + * @return The number of dependencies in the transformation. + */ + public int nrDependencies() { + return dependencies.size(); + } + + /** + * Gets the number of stepChangeListeners in the transformation. + * + * @return The number of stepChangeListeners in the transformation. + */ + public int nrStepChangeListeners() { + return stepChangeListeners.size(); + } + + /** + * Changes the content of a step on a certain position. This is accomplished by setting the step's metadata at the + * specified index to the specified meta-data object. The new step's parent transformation is updated to be this + * transformation. + * + * @param i + * The index into the steps list + * @param stepMeta + * The step meta-data to set + */ + public void setStep( int i, StepMeta stepMeta ) { + StepMetaInterface iface = stepMeta.getStepMetaInterface(); + if ( iface instanceof StepMetaChangeListenerInterface ) { + addStepChangeListener( i, (StepMetaChangeListenerInterface) stepMeta.getStepMetaInterface() ); + } + steps.set( i, stepMeta ); + stepMeta.setParentTransMeta( this ); + } + + /** + * Changes the content of a hop on a certain position. This is accomplished by setting the hop's metadata at the + * specified index to the specified meta-data object. + * + * @param i + * The index into the hops list + * @param hi + * The hop meta-data to set + */ + public void setTransHop( int i, TransHopMeta hi ) { + hops.set( i, hi ); + } + + /** + * Gets the list of used steps, which are the steps that are connected by hops. + * + * @return a list with all the used steps + */ + public List getUsedSteps() { + List list = new ArrayList(); + + for ( StepMeta stepMeta : steps ) { + if ( isStepUsedInTransHops( stepMeta ) ) { + list.add( stepMeta ); + } + } + + return list; + } + + /** + * Searches the list of steps for a step with a certain name. + * + * @param name + * The name of the step to look for + * @return The step information or null if no nothing was found. + */ + public StepMeta findStep( String name ) { + return findStep( name, null ); + } + + /** + * Searches the list of steps for a step with a certain name while excluding one step. + * + * @param name + * The name of the step to look for + * @param exclude + * The step information to exclude. + * @return The step information or null if nothing was found. + */ + public StepMeta findStep( String name, StepMeta exclude ) { + if ( name == null ) { + return null; + } + + int excl = -1; + if ( exclude != null ) { + excl = indexOfStep( exclude ); + } + + for ( int i = 0; i < nrSteps(); i++ ) { + StepMeta stepMeta = getStep( i ); + if ( i != excl && stepMeta.getName().equalsIgnoreCase( name ) ) { + return stepMeta; + } + } + return null; + } + + /** + * Searches the list of hops for a hop with a certain name. + * + * @param name + * The name of the hop to look for + * @return The hop information or null if nothing was found. + */ + public TransHopMeta findTransHop( String name ) { + int i; + + for ( i = 0; i < nrTransHops(); i++ ) { + TransHopMeta hi = getTransHop( i ); + if ( hi.toString().equalsIgnoreCase( name ) ) { + return hi; + } + } + return null; + } + + /** + * Search all hops for a hop where a certain step is at the start. + * + * @param fromstep + * The step at the start of the hop. + * @return The hop or null if no hop was found. + */ + public TransHopMeta findTransHopFrom( StepMeta fromstep ) { + int i; + for ( i = 0; i < nrTransHops(); i++ ) { + TransHopMeta hi = getTransHop( i ); + if ( hi.getFromStep() != null && hi.getFromStep().equals( fromstep ) ) // return the first + { + return hi; + } + } + return null; + } + + /** + * Find a certain hop in the transformation. + * + * @param hi + * The hop information to look for. + * @return The hop or null if no hop was found. + */ + public TransHopMeta findTransHop( TransHopMeta hi ) { + return findTransHop( hi.getFromStep(), hi.getToStep() ); + } + + /** + * Search all hops for a hop where a certain step is at the start and another is at the end. + * + * @param from + * The step at the start of the hop. + * @param to + * The step at the end of the hop. + * @return The hop or null if no hop was found. + */ + public TransHopMeta findTransHop( StepMeta from, StepMeta to ) { + return findTransHop( from, to, false ); + } + + /** + * Search all hops for a hop where a certain step is at the start and another is at the end. + * + * @param from + * The step at the start of the hop. + * @param to + * The step at the end of the hop. + * @param disabledToo + * the disabled too + * @return The hop or null if no hop was found. + */ + public TransHopMeta findTransHop( StepMeta from, StepMeta to, boolean disabledToo ) { + for ( int i = 0; i < nrTransHops(); i++ ) { + TransHopMeta hi = getTransHop( i ); + if ( hi.isEnabled() || disabledToo ) { + if ( hi.getFromStep() != null && hi.getToStep() != null && hi.getFromStep().equals( from ) && hi.getToStep() + .equals( to ) ) { + return hi; + } + } + } + return null; + } + + /** + * Search all hops for a hop where a certain step is at the end. + * + * @param tostep + * The step at the end of the hop. + * @return The hop or null if no hop was found. + */ + public TransHopMeta findTransHopTo( StepMeta tostep ) { + int i; + for ( i = 0; i < nrTransHops(); i++ ) { + TransHopMeta hi = getTransHop( i ); + if ( hi.getToStep() != null && hi.getToStep().equals( tostep ) ) // Return the first! + { + return hi; + } + } + return null; + } + + /** + * Determines whether or not a certain step is informative. This means that the previous step is sending information + * to this step, but only informative. This means that this step is using the information to process the actual stream + * of data. We use this in StreamLookup, TableInput and other types of steps. + * + * @param this_step + * The step that is receiving information. + * @param prev_step + * The step that is sending information + * @return true if prev_step if informative for this_step. + */ + public boolean isStepInformative( StepMeta this_step, StepMeta prev_step ) { + String[] infoSteps = this_step.getStepMetaInterface().getStepIOMeta().getInfoStepnames(); + if ( infoSteps == null ) { + return false; + } + for ( int i = 0; i < infoSteps.length; i++ ) { + if ( prev_step.getName().equalsIgnoreCase( infoSteps[i] ) ) { + return true; + } + } + + return false; + } + + /** + * Counts the number of previous steps for a step name. + * + * @param stepname + * The name of the step to start from + * @return The number of preceding steps. + * @deprecated + */ + @Deprecated + public int findNrPrevSteps( String stepname ) { + return findNrPrevSteps( findStep( stepname ), false ); + } + + /** + * Counts the number of previous steps for a step name taking into account whether or not they are informational. + * + * @param stepname + * The name of the step to start from + * @param info + * true if only the informational steps are desired, false otherwise + * @return The number of preceding steps. + * @deprecated + */ + @Deprecated + public int findNrPrevSteps( String stepname, boolean info ) { + return findNrPrevSteps( findStep( stepname ), info ); + } + + /** + * Find the number of steps that precede the indicated step. + * + * @param stepMeta + * The source step + * + * @return The number of preceding steps found. + */ + public int findNrPrevSteps( StepMeta stepMeta ) { + return findNrPrevSteps( stepMeta, false ); + } + + /** + * Find the previous step on a certain location (i.e. the specified index). + * + * @param stepname + * The source step name + * @param nr + * the index into the step list + * + * @return The preceding step found. + * @deprecated + */ + @Deprecated + public StepMeta findPrevStep( String stepname, int nr ) { + return findPrevStep( findStep( stepname ), nr ); + } + + /** + * Find the previous step on a certain location taking into account the steps being informational or not. + * + * @param stepname + * The name of the step + * @param nr + * The index into the step list + * @param info + * true if only the informational steps are desired, false otherwise + * @return The step information + * @deprecated + */ + @Deprecated + public StepMeta findPrevStep( String stepname, int nr, boolean info ) { + return findPrevStep( findStep( stepname ), nr, info ); + } + + /** + * Find the previous step on a certain location (i.e. the specified index). + * + * @param stepMeta + * The source step information + * @param nr + * the index into the hops list + * + * @return The preceding step found. + */ + public StepMeta findPrevStep( StepMeta stepMeta, int nr ) { + return findPrevStep( stepMeta, nr, false ); + } + + /** + * Count the number of previous steps on a certain location taking into account the steps being informational or not. + * + * @param stepMeta + * The name of the step + * @param info + * true if only the informational steps are desired, false otherwise + * @return The number of preceding steps + * @deprecated please use method findPreviousSteps + */ + @Deprecated + public int findNrPrevSteps( StepMeta stepMeta, boolean info ) { + int count = 0; + int i; + + for ( i = 0; i < nrTransHops(); i++ ) { // Look at all the hops; + TransHopMeta hi = getTransHop( i ); + if ( hi.getToStep() != null && hi.isEnabled() && hi.getToStep().equals( stepMeta ) ) { + // Check if this previous step isn't informative (StreamValueLookup) + // We don't want fields from this stream to show up! + if ( info || !isStepInformative( stepMeta, hi.getFromStep() ) ) { + count++; + } + } + } + return count; + } + + /** + * Find the previous step on a certain location taking into account the steps being informational or not. + * + * @param stepMeta + * The step + * @param nr + * The index into the hops list + * @param info + * true if we only want the informational steps. + * @return The preceding step information + * @deprecated please use method findPreviousSteps + */ + @Deprecated + public StepMeta findPrevStep( StepMeta stepMeta, int nr, boolean info ) { + int count = 0; + int i; + + for ( i = 0; i < nrTransHops(); i++ ) { // Look at all the hops; + + TransHopMeta hi = getTransHop( i ); + if ( hi.getToStep() != null && hi.isEnabled() && hi.getToStep().equals( stepMeta ) ) { + if ( info || !isStepInformative( stepMeta, hi.getFromStep() ) ) { + if ( count == nr ) { + return hi.getFromStep(); + } + count++; + } + } + } + return null; + } + + /** + * Get the list of previous steps for a certain reference step. This includes the info steps. + * + * @param stepMeta + * The reference step + * @return The list of the preceding steps, including the info steps. + */ + public List findPreviousSteps( StepMeta stepMeta ) { + return findPreviousSteps( stepMeta, true ); + } + + /** + * Get the previous steps on a certain location taking into account the steps being informational or not. + * + * @param stepMeta + * The name of the step + * @param info + * true if we only want the informational steps. + * @return The list of the preceding steps + */ + public List findPreviousSteps( StepMeta stepMeta, boolean info ) { + List previousSteps = new ArrayList(); + + for ( TransHopMeta hi : hops ) { + if ( hi.getToStep() != null && hi.isEnabled() && hi.getToStep().equals( stepMeta ) ) { + // Check if this previous step isn't informative (StreamValueLookup) + // We don't want fields from this stream to show up! + if ( info || !isStepInformative( stepMeta, hi.getFromStep() ) ) { + previousSteps.add( hi.getFromStep() ); + } + } + } + return previousSteps; + } + + /** + * Get the informational steps for a certain step. An informational step is a step that provides information for + * lookups, etc. + * + * @param stepMeta + * The name of the step + * @return An array of the informational steps found + */ + public StepMeta[] getInfoStep( StepMeta stepMeta ) { + String[] infoStepName = stepMeta.getStepMetaInterface().getStepIOMeta().getInfoStepnames(); + if ( infoStepName == null ) { + return null; + } + + StepMeta[] infoStep = new StepMeta[infoStepName.length]; + for ( int i = 0; i < infoStep.length; i++ ) { + infoStep[i] = findStep( infoStepName[i] ); + } + + return infoStep; + } + + /** + * Find the the number of informational steps for a certain step. + * + * @param stepMeta + * The step + * @return The number of informational steps found. + */ + public int findNrInfoSteps( StepMeta stepMeta ) { + if ( stepMeta == null ) { + return 0; + } + + int count = 0; + + for ( int i = 0; i < nrTransHops(); i++ ) { // Look at all the hops; + + TransHopMeta hi = getTransHop( i ); + if ( hi == null || hi.getToStep() == null ) { + log.logError( BaseMessages.getString( PKG, "TransMeta.Log.DestinationOfHopCannotBeNull" ) ); + } + if ( hi != null && hi.getToStep() != null && hi.isEnabled() && hi.getToStep().equals( stepMeta ) ) { + // Check if this previous step isn't informative (StreamValueLookup) + // We don't want fields from this stream to show up! + if ( isStepInformative( stepMeta, hi.getFromStep() ) ) { + count++; + } + } + } + return count; + } + + /** + * Find the informational fields coming from an informational step into the step specified. + * + * @param stepname + * The name of the step + * @return A row containing fields with origin. + * @throws KettleStepException + * the kettle step exception + */ + public RowMetaInterface getPrevInfoFields( String stepname ) throws KettleStepException { + return getPrevInfoFields( findStep( stepname ) ); + } + + /** + * Find the informational fields coming from an informational step into the step specified. + * + * @param stepMeta + * The receiving step + * @return A row containing fields with origin. + * @throws KettleStepException + * the kettle step exception + */ + public RowMetaInterface getPrevInfoFields( StepMeta stepMeta ) throws KettleStepException { + RowMetaInterface row = new RowMeta(); + + for ( int i = 0; i < nrTransHops(); i++ ) { // Look at all the hops; + + TransHopMeta hi = getTransHop( i ); + if ( hi.isEnabled() && hi.getToStep().equals( stepMeta ) ) { + StepMeta infoStep = hi.getFromStep(); + if ( isStepInformative( stepMeta, infoStep ) ) { + row = getPrevStepFields( infoStep ); + getThisStepFields( infoStep, stepMeta, row ); + return row; + } + } + } + return row; + } + + /** + * Find the number of succeeding steps for a certain originating step. + * + * @param stepMeta + * The originating step + * @return The number of succeeding steps. + * @deprecated just get the next steps as an array + */ + @Deprecated + public int findNrNextSteps( StepMeta stepMeta ) { + int count = 0; + int i; + for ( i = 0; i < nrTransHops(); i++ ) { // Look at all the hops; + + TransHopMeta hi = getTransHop( i ); + if ( hi.isEnabled() && hi.getFromStep().equals( stepMeta ) ) { + count++; + } + } + return count; + } + + /** + * Find the succeeding step at a location for an originating step. + * + * @param stepMeta + * The originating step + * @param nr + * The location + * @return The step found. + * @deprecated just get the next steps as an array + */ + @Deprecated + public StepMeta findNextStep( StepMeta stepMeta, int nr ) { + int count = 0; + int i; + + for ( i = 0; i < nrTransHops(); i++ ) { // Look at all the hops; + + TransHopMeta hi = getTransHop( i ); + if ( hi.isEnabled() && hi.getFromStep().equals( stepMeta ) ) { + if ( count == nr ) { + return hi.getToStep(); + } + count++; + } + } + return null; + } + + /** + * Retrieve an array of preceding steps for a certain destination step. This includes the info steps. + * + * @param stepMeta + * The destination step + * @return An array containing the preceding steps. + */ + public StepMeta[] getPrevSteps( StepMeta stepMeta ) { + List prevSteps = new ArrayList(); + for ( int i = 0; i < nrTransHops(); i++ ) { // Look at all the hops; + + TransHopMeta hopMeta = getTransHop( i ); + if ( hopMeta.isEnabled() && hopMeta.getToStep().equals( stepMeta ) ) { + prevSteps.add( hopMeta.getFromStep() ); + } + } + + return prevSteps.toArray( new StepMeta[prevSteps.size()] ); + } + + /** + * Retrieve an array of succeeding step names for a certain originating step name. + * + * @param stepname + * The originating step name + * @return An array of succeeding step names + */ + public String[] getPrevStepNames( String stepname ) { + return getPrevStepNames( findStep( stepname ) ); + } + + /** + * Retrieve an array of preceding steps for a certain destination step. + * + * @param stepMeta + * The destination step + * @return an array of preceding step names. + */ + public String[] getPrevStepNames( StepMeta stepMeta ) { + StepMeta[] prevStepMetas = getPrevSteps( stepMeta ); + String[] retval = new String[prevStepMetas.length]; + for ( int x = 0; x < prevStepMetas.length; x++ ) { + retval[x] = prevStepMetas[x].getName(); + } + + return retval; + } + + /** + * Retrieve an array of succeeding steps for a certain originating step. + * + * @param stepMeta + * The originating step + * @return an array of succeeding steps. + * @deprecated use findNextSteps instead + */ + @Deprecated + public StepMeta[] getNextSteps( StepMeta stepMeta ) { + List nextSteps = new ArrayList(); + for ( int i = 0; i < nrTransHops(); i++ ) { // Look at all the hops; + + TransHopMeta hi = getTransHop( i ); + if ( hi.isEnabled() && hi.getFromStep().equals( stepMeta ) ) { + nextSteps.add( hi.getToStep() ); + } + } + + return nextSteps.toArray( new StepMeta[nextSteps.size()] ); + } + + /** + * Retrieve a list of succeeding steps for a certain originating step. + * + * @param stepMeta + * The originating step + * @return an array of succeeding steps. + */ + public List findNextSteps( StepMeta stepMeta ) { + List nextSteps = new ArrayList(); + for ( int i = 0; i < nrTransHops(); i++ ) { // Look at all the hops; + + TransHopMeta hi = getTransHop( i ); + if ( hi.isEnabled() && hi.getFromStep().equals( stepMeta ) ) { + nextSteps.add( hi.getToStep() ); + } + } + + return nextSteps; + } + + /** + * Retrieve an array of succeeding step names for a certain originating step. + * + * @param stepMeta + * The originating step + * @return an array of succeeding step names. + */ + public String[] getNextStepNames( StepMeta stepMeta ) { + StepMeta[] nextStepMeta = getNextSteps( stepMeta ); + String[] retval = new String[nextStepMeta.length]; + for ( int x = 0; x < nextStepMeta.length; x++ ) { + retval[x] = nextStepMeta[x].getName(); + } + + return retval; + } + + /** + * Find the step that is located on a certain point on the canvas, taking into account the icon size. + * + * @param x + * the x-coordinate of the point queried + * @param y + * the y-coordinate of the point queried + * @param iconsize + * the iconsize + * @return The step information if a step is located at the point. Otherwise, if no step was found: null. + */ + public StepMeta getStep( int x, int y, int iconsize ) { + int i, s; + s = steps.size(); + for ( i = s - 1; i >= 0; i-- ) // Back to front because drawing goes from start to end + { + StepMeta stepMeta = steps.get( i ); + if ( partOfTransHop( stepMeta ) || stepMeta.isDrawn() ) // Only consider steps from active or inactive hops! + { + Point p = stepMeta.getLocation(); + if ( p != null ) { + if ( x >= p.x && x <= p.x + iconsize && y >= p.y && y <= p.y + iconsize + 20 ) { + return stepMeta; + } + } + } + } + return null; + } + + /** + * Determines whether or not a certain step is part of a hop. + * + * @param stepMeta + * The step queried + * @return true if the step is part of a hop. + */ + public boolean partOfTransHop( StepMeta stepMeta ) { + int i; + for ( i = 0; i < nrTransHops(); i++ ) { + TransHopMeta hi = getTransHop( i ); + if ( hi.getFromStep() == null || hi.getToStep() == null ) { + return false; + } + if ( hi.getFromStep().equals( stepMeta ) || hi.getToStep().equals( stepMeta ) ) { + return true; + } + } + return false; + } + + /** + * Returns the fields that are emitted by a certain step name. + * + * @param stepname + * The stepname of the step to be queried. + * @return A row containing the fields emitted. + * @throws KettleStepException + * the kettle step exception + */ + public RowMetaInterface getStepFields( String stepname ) throws KettleStepException { + StepMeta stepMeta = findStep( stepname ); + if ( stepMeta != null ) { + return getStepFields( stepMeta ); + } else { + return null; + } + } + + /** + * Returns the fields that are emitted by a certain step. + * + * @param stepMeta + * The step to be queried. + * @return A row containing the fields emitted. + * @throws KettleStepException + * the kettle step exception + */ + public RowMetaInterface getStepFields( StepMeta stepMeta ) throws KettleStepException { + return getStepFields( stepMeta, null ); + } + + /** + * Gets the fields for each of the specified steps and merges them into a single set + * + * @param stepMeta + * the step meta + * @return an interface to the step fields + * @throws KettleStepException + * the kettle step exception + */ + public RowMetaInterface getStepFields( StepMeta[] stepMeta ) throws KettleStepException { + RowMetaInterface fields = new RowMeta(); + + for ( int i = 0; i < stepMeta.length; i++ ) { + RowMetaInterface flds = getStepFields( stepMeta[i] ); + if ( flds != null ) { + fields.mergeRowMeta( flds ); + } + } + return fields; + } + + /** + * Returns the fields that are emitted by a certain step. + * + * @param stepMeta + * The step to be queried. + * @param monitor + * The progress monitor for progress dialog. (null if not used!) + * @return A row containing the fields emitted. + * @throws KettleStepException + * the kettle step exception + */ + public RowMetaInterface getStepFields( StepMeta stepMeta, ProgressMonitorListener monitor ) throws KettleStepException { + clearStepFieldsCachce(); + setRepositoryOnMappingSteps(); + return getStepFields( stepMeta, null, monitor ); + } + + /** + * Returns the fields that are emitted by a certain step. + * + * @param stepMeta + * The step to be queried. + * @param targetStep + * the target step + * @param monitor + * The progress monitor for progress dialog. (null if not used!) + * @return A row containing the fields emitted. + * @throws KettleStepException + * the kettle step exception + */ + public RowMetaInterface getStepFields( StepMeta stepMeta, StepMeta targetStep, ProgressMonitorListener monitor ) throws KettleStepException { + RowMetaInterface row = new RowMeta(); + + if ( stepMeta == null ) { + return row; + } + + String fromToCacheEntry = stepMeta.getName() + ( targetStep != null ? ( "-" + targetStep.getName() ) : "" ); + RowMetaInterface rowMeta = stepsFieldsCache.get( fromToCacheEntry ); + if ( rowMeta != null ) { + return rowMeta; + } + + // See if the step is sending ERROR rows to the specified target step. + // + if ( targetStep != null && stepMeta.isSendingErrorRowsToStep( targetStep ) ) { + // The error rows are the same as the input rows for + // the step but with the selected error fields added + // + row = getPrevStepFields( stepMeta ); + + // Add to this the error fields... + StepErrorMeta stepErrorMeta = stepMeta.getStepErrorMeta(); + row.addRowMeta( stepErrorMeta.getErrorFields() ); + + // Store this row in the cache + // + stepsFieldsCache.put( fromToCacheEntry, row ); + + return row; + } + + // Resume the regular program... + + if ( log.isDebug() ) { + log.logDebug( BaseMessages.getString( PKG, "TransMeta.Log.FromStepALookingAtPreviousStep", stepMeta.getName(), + String.valueOf( findNrPrevSteps( stepMeta ) ) ) ); + } + int nrPrevious = findNrPrevSteps( stepMeta ); + for ( int i = 0; i < nrPrevious; i++ ) { + StepMeta prevStepMeta = findPrevStep( stepMeta, i ); + + if ( monitor != null ) { + monitor.subTask( + BaseMessages.getString( PKG, "TransMeta.Monitor.CheckingStepTask.Title", prevStepMeta.getName() ) ); + } + + RowMetaInterface add = getStepFields( prevStepMeta, stepMeta, monitor ); + if ( add == null ) { + add = new RowMeta(); + } + if ( log.isDebug() ) { + log.logDebug( BaseMessages.getString( PKG, "TransMeta.Log.FoundFieldsToAdd" ) + add.toString() ); + } + if ( i == 0 ) { + row.addRowMeta( add ); + } else { + // See if the add fields are not already in the row + for ( int x = 0; x < add.size(); x++ ) { + ValueMetaInterface v = add.getValueMeta( x ); + ValueMetaInterface s = row.searchValueMeta( v.getName() ); + if ( s == null ) { + row.addValueMeta( v ); + } + } + } + } + + if ( nrPrevious == 0 && stepMeta.getRemoteInputSteps().size() > 0 ) { + // Also check the remote input steps (clustering) + // Typically, if there are any, row is still empty at this point + // We'll also be at a starting point in the transformation + // + for ( RemoteStep remoteStep : stepMeta.getRemoteInputSteps() ) { + RowMetaInterface inputFields = remoteStep.getRowMeta(); + for ( ValueMetaInterface inputField : inputFields.getValueMetaList() ) { + if ( row.searchValueMeta( inputField.getName() ) == null ) { + row.addValueMeta( inputField ); + } + } + } + } + + // Finally, see if we need to add/modify/delete fields with this step "name" + rowMeta = getThisStepFields( stepMeta, targetStep, row, monitor ); + + // Store this row in the cache + // + stepsFieldsCache.put( fromToCacheEntry, rowMeta ); + + return rowMeta; + } + + /** + * Find the fields that are entering a step with a certain name. + * + * @param stepname + * The name of the step queried + * @return A row containing the fields (w/ origin) entering the step + * @throws KettleStepException + * the kettle step exception + */ + public RowMetaInterface getPrevStepFields( String stepname ) throws KettleStepException { + clearStepFieldsCachce(); + return getPrevStepFields( findStep( stepname ) ); + } + + /** + * Find the fields that are entering a certain step. + * + * @param stepMeta + * The step queried + * @return A row containing the fields (w/ origin) entering the step + * @throws KettleStepException + * the kettle step exception + */ + public RowMetaInterface getPrevStepFields( StepMeta stepMeta ) throws KettleStepException { + clearStepFieldsCachce(); + return getPrevStepFields( stepMeta, null ); + } + + /** + * Find the fields that are entering a certain step. + * + * @param stepMeta + * The step queried + * @param monitor + * The progress monitor for progress dialog. (null if not used!) + * @return A row containing the fields (w/ origin) entering the step + * @throws KettleStepException + * the kettle step exception + */ + public RowMetaInterface getPrevStepFields( StepMeta stepMeta, ProgressMonitorListener monitor ) throws KettleStepException { + clearStepFieldsCachce(); + + RowMetaInterface row = new RowMeta(); + + if ( stepMeta == null ) { + return null; + } + + if ( log.isDebug() ) { + log.logDebug( BaseMessages.getString( PKG, "TransMeta.Log.FromStepALookingAtPreviousStep", stepMeta.getName(), + String.valueOf( findNrPrevSteps( stepMeta ) ) ) ); + } + for ( int i = 0; i < findNrPrevSteps( stepMeta ); i++ ) { + StepMeta prevStepMeta = findPrevStep( stepMeta, i ); + + if ( monitor != null ) { + monitor.subTask( + BaseMessages.getString( PKG, "TransMeta.Monitor.CheckingStepTask.Title", prevStepMeta.getName() ) ); + } + + RowMetaInterface add = getStepFields( prevStepMeta, stepMeta, monitor ); + + if ( log.isDebug() ) { + log.logDebug( BaseMessages.getString( PKG, "TransMeta.Log.FoundFieldsToAdd2" ) + add.toString() ); + } + if ( i == 0 ) { + // we expect all input streams to be of the same layout! + + row.addRowMeta( add ); // recursive! + } else { + // See if the add fields are not already in the row + for ( int x = 0; x < add.size(); x++ ) { + ValueMetaInterface v = add.getValueMeta( x ); + ValueMetaInterface s = row.searchValueMeta( v.getName() ); + if ( s == null ) { + row.addValueMeta( v ); + } + } + } + } + return row; + } + + /** + * Return the fields that are emitted by a step with a certain name. + * + * @param stepname + * The name of the step that's being queried. + * @param row + * A row containing the input fields or an empty row if no input is required. + * @return A Row containing the output fields. + * @throws KettleStepException + * the kettle step exception + */ + public RowMetaInterface getThisStepFields( String stepname, RowMetaInterface row ) throws KettleStepException { + return getThisStepFields( findStep( stepname ), null, row ); + } + + /** + * Returns the fields that are emitted by a step. + * + * @param stepMeta + * : The StepMeta object that's being queried + * @param nextStep + * : if non-null this is the next step that's call back to ask what's being sent + * @param row + * : A row containing the input fields or an empty row if no input is required. + * @return A Row containing the output fields. + * @throws KettleStepException + * the kettle step exception + */ + public RowMetaInterface getThisStepFields( StepMeta stepMeta, StepMeta nextStep, RowMetaInterface row ) throws KettleStepException { + return getThisStepFields( stepMeta, nextStep, row, null ); + } + + /** + * Returns the fields that are emitted by a step. + * + * @param stepMeta + * : The StepMeta object that's being queried + * @param nextStep + * : if non-null this is the next step that's call back to ask what's being sent + * @param row + * : A row containing the input fields or an empty row if no input is required. + * @param monitor + * the monitor + * @return A Row containing the output fields. + * @throws KettleStepException + * the kettle step exception + */ + public RowMetaInterface getThisStepFields( StepMeta stepMeta, StepMeta nextStep, RowMetaInterface row, + ProgressMonitorListener monitor ) throws KettleStepException { + // Then this one. + if ( log.isDebug() ) { + log.logDebug( BaseMessages + .getString( PKG, "TransMeta.Log.GettingFieldsFromStep", stepMeta.getName(), stepMeta.getStepID() ) ); + } + String name = stepMeta.getName(); + + if ( monitor != null ) { + monitor.subTask( BaseMessages.getString( PKG, "TransMeta.Monitor.GettingFieldsFromStepTask.Title", name ) ); + } + + StepMetaInterface stepint = stepMeta.getStepMetaInterface(); + RowMetaInterface[] inform = null; + StepMeta[] lu = getInfoStep( stepMeta ); + if ( Const.isEmpty( lu ) ) { + inform = new RowMetaInterface[] { stepint.getTableFields(), }; + } else { + inform = new RowMetaInterface[lu.length]; + for ( int i = 0; i < lu.length; i++ ) { + inform[i] = getStepFields( lu[i] ); + } + } + + setRepositoryOnMappingSteps(); + + // Go get the fields... + // + RowMetaInterface before = row.clone(); + compatibleGetStepFields( stepint, row, name, inform, nextStep, this ); + if ( !isSomethingDifferentInRow( before, row ) ) { + stepint.getFields( before, name, inform, nextStep, this, repository, metaStore ); + // pass the clone object to prevent from spoiling data by other steps + row = before; + } + + return row; + } + + @SuppressWarnings( "deprecation" ) + private void compatibleGetStepFields( StepMetaInterface stepint, RowMetaInterface row, String name, + RowMetaInterface[] inform, StepMeta nextStep, VariableSpace space ) throws KettleStepException { + + stepint.getFields( row, name, inform, nextStep, space ); + + } + + private boolean isSomethingDifferentInRow( RowMetaInterface before, RowMetaInterface after ) { + if ( before.size() != after.size() ) { + return true; + } + for ( int i = 0; i < before.size(); i++ ) { + ValueMetaInterface beforeValueMeta = before.getValueMeta( i ); + ValueMetaInterface afterValueMeta = after.getValueMeta( i ); + if ( stringsDifferent( beforeValueMeta.getName(), afterValueMeta.getName() ) ) { + return true; + } + if ( beforeValueMeta.getType() != afterValueMeta.getType() ) { + return true; + } + if ( beforeValueMeta.getLength() != afterValueMeta.getLength() ) { + return true; + } + if ( beforeValueMeta.getPrecision() != afterValueMeta.getPrecision() ) { + return true; + } + if ( stringsDifferent( beforeValueMeta.getOrigin(), afterValueMeta.getOrigin() ) ) { + return true; + } + if ( stringsDifferent( beforeValueMeta.getComments(), afterValueMeta.getComments() ) ) { + return true; + } + if ( stringsDifferent( beforeValueMeta.getConversionMask(), afterValueMeta.getConversionMask() ) ) { + return true; + } + if ( stringsDifferent( beforeValueMeta.getStringEncoding(), afterValueMeta.getStringEncoding() ) ) { + return true; + } + if ( stringsDifferent( beforeValueMeta.getDecimalSymbol(), afterValueMeta.getDecimalSymbol() ) ) { + return true; + } + if ( stringsDifferent( beforeValueMeta.getGroupingSymbol(), afterValueMeta.getGroupingSymbol() ) ) { + return true; + } + } + return false; + } + + private boolean stringsDifferent( String one, String two ) { + if ( one == null && two == null ) { + return false; + } + if ( one == null && two != null ) { + return true; + } + if ( one != null && two == null ) { + return true; + } + return !one.equals( two ); + } + + /** + * Set the Repository object on the Mapping step That way the mapping step can determine the output fields for + * repository hosted mappings... This is the exception to the rule so we don't pass this through the getFields() + * method. TODO: figure out a way to make this more generic. + */ + private void setRepositoryOnMappingSteps() { + + for ( StepMeta step : steps ) { + if ( step.getStepMetaInterface() instanceof MappingMeta ) { + ( (MappingMeta) step.getStepMetaInterface() ).setRepository( repository ); + ( (MappingMeta) step.getStepMetaInterface() ).setMetaStore( metaStore ); + } + if ( step.getStepMetaInterface() instanceof SingleThreaderMeta ) { + ( (SingleThreaderMeta) step.getStepMetaInterface() ).setRepository( repository ); + ( (SingleThreaderMeta) step.getStepMetaInterface() ).setMetaStore( metaStore ); + } + if ( step.getStepMetaInterface() instanceof JobExecutorMeta ) { + ( (JobExecutorMeta) step.getStepMetaInterface() ).setRepository( repository ); + ( (JobExecutorMeta) step.getStepMetaInterface() ).setMetaStore( metaStore ); + } + if ( step.getStepMetaInterface() instanceof TransExecutorMeta ) { + ( (TransExecutorMeta) step.getStepMetaInterface() ).setRepository( repository ); + ( (TransExecutorMeta) step.getStepMetaInterface() ).setMetaStore( metaStore ); + } + } + } + + /** + * Checks if the transformation is using the specified partition schema. + * + * @param partitionSchema + * the partition schema + * @return true if the transformation is using the partition schema, false otherwise + */ + public boolean isUsingPartitionSchema( PartitionSchema partitionSchema ) { + // Loop over all steps and see if the partition schema is used. + for ( int i = 0; i < nrSteps(); i++ ) { + StepPartitioningMeta stepPartitioningMeta = getStep( i ).getStepPartitioningMeta(); + if ( stepPartitioningMeta != null ) { + PartitionSchema check = stepPartitioningMeta.getPartitionSchema(); + if ( check != null && check.equals( partitionSchema ) ) { + return true; + } + } + } + return false; + } + + /** + * Checks if the transformation is using a cluster schema. + * + * @return true if a cluster schema is used on one or more steps in this transformation, false otherwise + */ + public boolean isUsingAClusterSchema() { + return isUsingClusterSchema( null ); + } + + /** + * Checks if the transformation is using the specified cluster schema. + * + * @param clusterSchema + * the cluster schema to check + * @return true if the specified cluster schema is used on one or more steps in this transformation + */ + public boolean isUsingClusterSchema( ClusterSchema clusterSchema ) { + // Loop over all steps and see if the partition schema is used. + for ( int i = 0; i < nrSteps(); i++ ) { + ClusterSchema check = getStep( i ).getClusterSchema(); + if ( check != null && ( clusterSchema == null || check.equals( clusterSchema ) ) ) { + return true; + } + } + return false; + } + + /** + * Checks if the transformation is using the specified slave server. + * + * @param slaveServer + * the slave server + * @return true if the transformation is using the slave server, false otherwise + * @throws KettleException + * if any errors occur while checking for the slave server + */ + public boolean isUsingSlaveServer( SlaveServer slaveServer ) throws KettleException { + // Loop over all steps and see if the slave server is used. + for ( int i = 0; i < nrSteps(); i++ ) { + ClusterSchema clusterSchema = getStep( i ).getClusterSchema(); + if ( clusterSchema != null ) { + for ( SlaveServer check : clusterSchema.getSlaveServers() ) { + if ( check.equals( slaveServer ) ) { + return true; + } + } + return true; + } + } + return false; + } + + /** + * Checks if the transformation is referenced by a repository. + * + * @return true if the transformation is referenced by a repository, false otherwise + */ + public boolean isRepReference() { + return isRepReference( getFilename(), this.getName() ); + } + + /** + * Checks if the transformation is referenced by a file. If the transformation is not referenced by a repository, it + * is assumed to be referenced by a file. + * + * @return true if the transformation is referenced by a file, false otherwise + * @see #isRepReference() + */ + public boolean isFileReference() { + return !isRepReference( getFilename(), this.getName() ); + } + + /** + * Checks (using the exact filename and transformation name) if the transformation is referenced by a repository. If + * referenced by a repository, the exact filename should be empty and the exact transformation name should be + * non-empty. + * + * @param exactFilename + * the exact filename + * @param exactTransname + * the exact transformation name + * @return true if the transformation is referenced by a repository, false otherwise + */ + public static boolean isRepReference( String exactFilename, String exactTransname ) { + return Const.isEmpty( exactFilename ) && !Const.isEmpty( exactTransname ); + } + + /** + * Checks (using the exact filename and transformation name) if the transformation is referenced by a file. If + * referenced by a repository, the exact filename should be non-empty and the exact transformation name should be + * empty. + * + * @param exactFilename + * the exact filename + * @param exactTransname + * the exact transformation name + * @return true if the transformation is referenced by a file, false otherwise + * @see #isRepReference(String, String) + */ + public static boolean isFileReference( String exactFilename, String exactTransname ) { + return !isRepReference( exactFilename, exactTransname ); + } + + /** + * Finds the location (index) of the specified hop. + * + * @param hi + * The hop queried + * @return The location of the hop, or -1 if nothing was found. + */ + public int indexOfTransHop( TransHopMeta hi ) { + return hops.indexOf( hi ); + } + + /** + * Finds the location (index) of the specified step. + * + * @param stepMeta + * The step queried + * @return The location of the step, or -1 if nothing was found. + */ + public int indexOfStep( StepMeta stepMeta ) { + return steps.indexOf( stepMeta ); + } + + /** + * Gets the file type. For TransMeta, this returns a value corresponding to Transformation + * + * @return the file type + * @see org.pentaho.di.core.EngineMetaInterface#getFileType() + */ + public String getFileType() { + return LastUsedFile.FILE_TYPE_TRANSFORMATION; + } + + /** + * Gets the transformation filter names. + * + * @return the filter names + * @see org.pentaho.di.core.EngineMetaInterface#getFilterNames() + */ + public String[] getFilterNames() { + return Const.getTransformationFilterNames(); + } + + /** + * Gets the transformation filter extensions. For TransMeta, this method returns the value of + * {@link Const#STRING_TRANS_FILTER_EXT} + * + * @return the filter extensions + * @see org.pentaho.di.core.EngineMetaInterface#getFilterExtensions() + */ + public String[] getFilterExtensions() { + return Const.STRING_TRANS_FILTER_EXT; + } + + /** + * Gets the default extension for a transformation. For TransMeta, this method returns the value of + * {@link Const#STRING_TRANS_DEFAULT_EXT} + * + * @return the default extension + * @see org.pentaho.di.core.EngineMetaInterface#getDefaultExtension() + */ + public String getDefaultExtension() { + return Const.STRING_TRANS_DEFAULT_EXT; + } + + /** + * Gets the XML representation of this transformation. + * + * @return the XML representation of this transformation + * @throws KettleException + * if any errors occur during generation of the XML + * @see org.pentaho.di.core.xml.XMLInterface#getXML() + */ + public String getXML() throws KettleException { + return getXML( true, true, true, true, true ); + } + + /** + * Gets the XML representation of this transformation, including or excluding step, database, slave server, cluster, + * or partition information as specified by the parameters + * + * @param includeSteps + * whether to include step data + * @param includeDatabase + * whether to include database data + * @param includeSlaves + * whether to include slave server data + * @param includeClusters + * whether to include cluster data + * @param includePartitions + * whether to include partition data + * @return the XML representation of this transformation + * @throws KettleException + * if any errors occur during generation of the XML + */ + public String getXML( boolean includeSteps, boolean includeDatabase, boolean includeSlaves, boolean includeClusters, + boolean includePartitions ) throws KettleException { + Props props = null; + if ( Props.isInitialized() ) { + props = Props.getInstance(); + } + + StringBuilder retval = new StringBuilder( 800 ); + + retval.append( XMLHandler.openTag( XML_TAG ) ).append( Const.CR ); + + retval.append( " " ).append( XMLHandler.openTag( XML_TAG_INFO ) ).append( Const.CR ); + + retval.append( " " ).append( XMLHandler.addTagValue( "name", name ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "description", description ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "extended_description", extendedDescription ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "trans_version", trans_version ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "trans_type", transformationType.getCode() ) ); + + if ( trans_status >= 0 ) { + retval.append( " " ).append( XMLHandler.addTagValue( "trans_status", trans_status ) ); + } + retval.append( " " ).append( XMLHandler.addTagValue( "directory", + directory != null ? directory.getPath() : RepositoryDirectory.DIRECTORY_SEPARATOR ) ); + + retval.append( " " ).append( XMLHandler.openTag( XML_TAG_PARAMETERS ) ).append( Const.CR ); + String[] parameters = listParameters(); + for ( int idx = 0; idx < parameters.length; idx++ ) { + retval.append( " " ).append( XMLHandler.openTag( "parameter" ) ).append( Const.CR ); + retval.append( " " ).append( XMLHandler.addTagValue( "name", parameters[idx] ) ); + retval.append( " " ) + .append( XMLHandler.addTagValue( "default_value", getParameterDefault( parameters[idx] ) ) ); + retval.append( " " ) + .append( XMLHandler.addTagValue( "description", getParameterDescription( parameters[idx] ) ) ); + retval.append( " " ).append( XMLHandler.closeTag( "parameter" ) ).append( Const.CR ); + } + retval.append( " " ).append( XMLHandler.closeTag( XML_TAG_PARAMETERS ) ).append( Const.CR ); + + retval.append( " " ).append( Const.CR ); + + // Add the metadata for the various logging tables + // + retval.append( transLogTable.getXML() ); + retval.append( performanceLogTable.getXML() ); + retval.append( channelLogTable.getXML() ); + retval.append( stepLogTable.getXML() ); + retval.append( metricsLogTable.getXML() ); + + retval.append( " " ).append( Const.CR ); + retval.append( " " ).append( Const.CR ); + retval.append( " " ) + .append( XMLHandler.addTagValue( "connection", maxDateConnection == null ? "" : maxDateConnection.getName() ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "table", maxDateTable ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "field", maxDateField ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "offset", maxDateOffset ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "maxdiff", maxDateDifference ) ); + retval.append( " " ).append( Const.CR ); + + retval.append( " " ).append( XMLHandler.addTagValue( "size_rowset", sizeRowset ) ); + + retval.append( " " ).append( XMLHandler.addTagValue( "sleep_time_empty", sleepTimeEmpty ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "sleep_time_full", sleepTimeFull ) ); + + retval.append( " " ).append( XMLHandler.addTagValue( "unique_connections", usingUniqueConnections ) ); + + retval.append( " " ).append( XMLHandler.addTagValue( "feedback_shown", feedbackShown ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "feedback_size", feedbackSize ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "using_thread_priorities", usingThreadPriorityManagment ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "shared_objects_file", sharedObjectsFile ) ); + + // Performance monitoring + // + retval.append( " " ) + .append( XMLHandler.addTagValue( "capture_step_performance", capturingStepPerformanceSnapShots ) ); + retval.append( " " ) + .append( XMLHandler.addTagValue( "step_performance_capturing_delay", stepPerformanceCapturingDelay ) ); + retval.append( " " ) + .append( XMLHandler.addTagValue( "step_performance_capturing_size_limit", stepPerformanceCapturingSizeLimit ) ); + + retval.append( " " ).append( XMLHandler.openTag( XML_TAG_DEPENDENCIES ) ).append( Const.CR ); + for ( int i = 0; i < nrDependencies(); i++ ) { + TransDependency td = getDependency( i ); + retval.append( td.getXML() ); + } + retval.append( " " ).append( XMLHandler.closeTag( XML_TAG_DEPENDENCIES ) ).append( Const.CR ); + + // The partitioning schemas... + // + if ( includePartitions ) { + retval.append( " " ).append( XMLHandler.openTag( XML_TAG_PARTITIONSCHEMAS ) ).append( Const.CR ); + for ( int i = 0; i < partitionSchemas.size(); i++ ) { + PartitionSchema partitionSchema = partitionSchemas.get( i ); + retval.append( partitionSchema.getXML() ); + } + retval.append( " " ).append( XMLHandler.closeTag( XML_TAG_PARTITIONSCHEMAS ) ).append( Const.CR ); + } + // The slave servers... + // + if ( includeSlaves ) { + retval.append( " " ).append( XMLHandler.openTag( XML_TAG_SLAVESERVERS ) ).append( Const.CR ); + for ( int i = 0; i < slaveServers.size(); i++ ) { + SlaveServer slaveServer = slaveServers.get( i ); + retval.append( " " ).append( slaveServer.getXML() ).append( Const.CR ); + } + retval.append( " " ).append( XMLHandler.closeTag( XML_TAG_SLAVESERVERS ) ).append( Const.CR ); + } + + // The cluster schemas... + // + if ( includeClusters ) { + retval.append( " " ).append( XMLHandler.openTag( XML_TAG_CLUSTERSCHEMAS ) ).append( Const.CR ); + for ( int i = 0; i < clusterSchemas.size(); i++ ) { + ClusterSchema clusterSchema = clusterSchemas.get( i ); + retval.append( clusterSchema.getXML() ); + } + retval.append( " " ).append( XMLHandler.closeTag( XML_TAG_CLUSTERSCHEMAS ) ).append( Const.CR ); + } + + retval.append( " " ).append( XMLHandler.addTagValue( "created_user", createdUser ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "created_date", XMLHandler.date2string( createdDate ) ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "modified_user", modifiedUser ) ); + retval.append( " " ).append( XMLHandler.addTagValue( "modified_date", XMLHandler.date2string( modifiedDate ) ) ); + + try { + retval.append( " " ).append( XMLHandler.addTagValue( "key_for_session_key", keyForSessionKey ) ); + } catch ( Exception ex ) { + log.logError( "Unable to decode key", ex ); + } + retval.append( " " ).append( XMLHandler.addTagValue( "is_key_private", isKeyPrivate ) ); + + retval.append( " " ).append( XMLHandler.closeTag( XML_TAG_INFO ) ).append( Const.CR ); + + retval.append( " " ).append( XMLHandler.openTag( XML_TAG_NOTEPADS ) ).append( Const.CR ); + if ( notes != null ) { + for ( int i = 0; i < nrNotes(); i++ ) { + NotePadMeta ni = getNote( i ); + retval.append( ni.getXML() ); + } + } + retval.append( " " ).append( XMLHandler.closeTag( XML_TAG_NOTEPADS ) ).append( Const.CR ); + + // The database connections... + if ( includeDatabase ) { + for ( int i = 0; i < nrDatabases(); i++ ) { + DatabaseMeta dbMeta = getDatabase( i ); + if ( props != null && props.areOnlyUsedConnectionsSavedToXML() ) { + if ( isDatabaseConnectionUsed( dbMeta ) ) { + retval.append( dbMeta.getXML() ); + } + } else { + retval.append( dbMeta.getXML() ); + } + } + } + + if ( includeSteps ) { + retval.append( " " ).append( XMLHandler.openTag( XML_TAG_ORDER ) ).append( Const.CR ); + for ( int i = 0; i < nrTransHops(); i++ ) { + TransHopMeta transHopMeta = getTransHop( i ); + retval.append( transHopMeta.getXML() ).append( Const.CR ); + } + retval.append( " " ).append( XMLHandler.closeTag( XML_TAG_ORDER ) ).append( Const.CR ); + + /* The steps... */ + for ( int i = 0; i < nrSteps(); i++ ) { + StepMeta stepMeta = getStep( i ); + if ( stepMeta.getStepMetaInterface() instanceof HasRepositoryInterface ) { + ( (HasRepositoryInterface) stepMeta.getStepMetaInterface() ).setRepository( repository ); + } + retval.append( stepMeta.getXML() ); + } + + /* The error handling metadata on the steps */ + retval.append( " " ).append( XMLHandler.openTag( XML_TAG_STEP_ERROR_HANDLING ) ).append( Const.CR ); + for ( int i = 0; i < nrSteps(); i++ ) { + StepMeta stepMeta = getStep( i ); + + if ( stepMeta.getStepErrorMeta() != null ) { + retval.append( stepMeta.getStepErrorMeta().getXML() ); + } + } + retval.append( " " ).append( XMLHandler.closeTag( XML_TAG_STEP_ERROR_HANDLING ) ).append( Const.CR ); + } + + // The slave-step-copy/partition distribution. Only used for slave transformations in a clustering environment. + retval.append( " " ).append( slaveStepCopyPartitionDistribution.getXML() ); + + // Is this a slave transformation or not? + retval.append( " " ).append( XMLHandler.addTagValue( "slave_transformation", slaveTransformation ) ); + + // Also store the attribute groups + // + retval.append( AttributesUtil.getAttributesXml( attributesMap ) ).append( Const.CR ); + + retval.append( "" ).append( Const.CR ); + + return retval.toString(); + } + + /** + * Parses a file containing the XML that describes the transformation. No default connections are loaded since no + * repository is available at this time. Since the filename is set, internal variables are being set that relate to + * this. + * + * @param fname + * The filename + * @throws KettleXMLException + * if any errors occur during parsing of the specified file + * @throws KettleMissingPluginsException + * in case missing plugins were found (details are in the exception in that case) + */ + public TransMeta( String fname ) throws KettleXMLException, KettleMissingPluginsException { + this( fname, true ); + } + + /** + * Parses a file containing the XML that describes the transformation. No default connections are loaded since no + * repository is available at this time. Since the filename is set, variables are set in the specified variable space + * that relate to this. + * + * @param fname + * The filename + * @param parentVariableSpace + * the parent variable space + * @throws KettleXMLException + * if any errors occur during parsing of the specified file + * @throws KettleMissingPluginsException + * in case missing plugins were found (details are in the exception in that case) + */ + public TransMeta( String fname, VariableSpace parentVariableSpace ) throws KettleXMLException, + KettleMissingPluginsException { + this( fname, null, true, parentVariableSpace ); + } + + /** + * Parses a file containing the XML that describes the transformation. No default connections are loaded since no + * repository is available at this time. + * + * @param fname + * The filename + * @param setInternalVariables + * true if you want to set the internal variables based on this transformation information + * @throws KettleXMLException + * if any errors occur during parsing of the specified file + * @throws KettleMissingPluginsException + * in case missing plugins were found (details are in the exception in that case) + */ + public TransMeta( String fname, boolean setInternalVariables ) throws KettleXMLException, + KettleMissingPluginsException { + this( fname, null, setInternalVariables ); + } + + /** + * Parses a file containing the XML that describes the transformation. + * + * @param fname + * The filename + * @param rep + * The repository to load the default set of connections from, null if no repository is available + * @throws KettleXMLException + * if any errors occur during parsing of the specified file + * @throws KettleMissingPluginsException + * in case missing plugins were found (details are in the exception in that case) + */ + public TransMeta( String fname, Repository rep ) throws KettleXMLException, KettleMissingPluginsException { + this( fname, rep, true ); + } + + /** + * Parses a file containing the XML that describes the transformation. + * + * @param fname + * The filename + * @param rep + * The repository to load the default set of connections from, null if no repository is available + * @param setInternalVariables + * true if you want to set the internal variables based on this transformation information + * @throws KettleXMLException + * if any errors occur during parsing of the specified file + * @throws KettleMissingPluginsException + * in case missing plugins were found (details are in the exception in that case) + */ + public TransMeta( String fname, Repository rep, boolean setInternalVariables ) throws KettleXMLException, + KettleMissingPluginsException { + this( fname, rep, setInternalVariables, null ); + } + + /** + * Parses a file containing the XML that describes the transformation. + * + * @param fname + * The filename + * @param rep + * The repository to load the default set of connections from, null if no repository is available + * @param setInternalVariables + * true if you want to set the internal variables based on this transformation information + * @param parentVariableSpace + * the parent variable space to use during TransMeta construction + * @throws KettleXMLException + * if any errors occur during parsing of the specified file + * @throws KettleMissingPluginsException + * in case missing plugins were found (details are in the exception in that case) + */ + public TransMeta( String fname, Repository rep, boolean setInternalVariables, VariableSpace parentVariableSpace ) throws KettleXMLException, KettleMissingPluginsException { + this( fname, rep, setInternalVariables, parentVariableSpace, null ); + } + + /** + * Parses a file containing the XML that describes the transformation. + * + * @param fname + * The filename + * @param rep + * The repository to load the default set of connections from, null if no repository is available + * @param setInternalVariables + * true if you want to set the internal variables based on this transformation information + * @param parentVariableSpace + * the parent variable space to use during TransMeta construction + * @param prompter + * the changed/replace listener or null if there is none + * @throws KettleXMLException + * if any errors occur during parsing of the specified file + * @throws KettleMissingPluginsException + * in case missing plugins were found (details are in the exception in that case) + */ + public TransMeta( String fname, Repository rep, boolean setInternalVariables, VariableSpace parentVariableSpace, + OverwritePrompter prompter ) throws KettleXMLException, KettleMissingPluginsException { + this( fname, null, rep, setInternalVariables, parentVariableSpace, prompter ); + } + + /** + * Parses a file containing the XML that describes the transformation. + * + * @param fname + * The filename + * @param metaStore + * the metadata store to reference (or null if there is none) + * @param rep + * The repository to load the default set of connections from, null if no repository is available + * @param setInternalVariables + * true if you want to set the internal variables based on this transformation information + * @param parentVariableSpace + * the parent variable space to use during TransMeta construction + * @param prompter + * the changed/replace listener or null if there is none + * @throws KettleXMLException + * if any errors occur during parsing of the specified file + * @throws KettleMissingPluginsException + * in case missing plugins were found (details are in the exception in that case) + */ + public TransMeta( String fname, IMetaStore metaStore, Repository rep, boolean setInternalVariables, + VariableSpace parentVariableSpace, OverwritePrompter prompter ) + throws KettleXMLException, KettleMissingPluginsException { + this.metaStore = metaStore; + this.repository = rep; + + // OK, try to load using the VFS stuff... + Document doc = null; + try { + doc = XMLHandler.loadXMLFile( KettleVFS.getFileObject( fname, parentVariableSpace ) ); + } catch ( KettleFileException e ) { + throw new KettleXMLException( BaseMessages.getString( + PKG, "TransMeta.Exception.ErrorOpeningOrValidatingTheXMLFile", fname ), e ); + } + + if ( doc != null ) { + // Root node: + Node transnode = XMLHandler.getSubNode( doc, XML_TAG ); + + if ( transnode == null ) { + throw new KettleXMLException( BaseMessages.getString( + PKG, "TransMeta.Exception.NotValidTransformationXML", fname ) ); + } + + // Load from this node... + loadXML( transnode, fname, metaStore, rep, setInternalVariables, parentVariableSpace, prompter ); + + } else { + throw new KettleXMLException( BaseMessages.getString( + PKG, "TransMeta.Exception.ErrorOpeningOrValidatingTheXMLFile", fname ) ); + } + } + + /** + * Instantiates a new transformation meta-data object. + * + * @param xmlStream + * the XML input stream from which to read the transformation definition + * @param rep + * the repository + * @param setInternalVariables + * whether to set internal variables as a result of the creation + * @param parentVariableSpace + * the parent variable space + * @param prompter + * a GUI component that will prompt the user if the new transformation will overwrite an existing one + * @throws KettleXMLException + * if any errors occur during parsing of the specified stream + * @throws KettleMissingPluginsException + * in case missing plugins were found (details are in the exception in that case) + */ + public TransMeta( InputStream xmlStream, Repository rep, boolean setInternalVariables, + VariableSpace parentVariableSpace, OverwritePrompter prompter ) + throws KettleXMLException, KettleMissingPluginsException { + Document doc = XMLHandler.loadXMLFile( xmlStream, null, false, false ); + Node transnode = XMLHandler.getSubNode( doc, XML_TAG ); + loadXML( transnode, rep, setInternalVariables, parentVariableSpace, prompter ); + } + + /** + * Parse a file containing the XML that describes the transformation. Specify a repository to load default list of + * database connections from and to reference in mappings etc. + * + * @param transnode + * The XML node to load from + * @param rep + * the repository to reference. + * @throws KettleXMLException + * if any errors occur during parsing of the specified file + * @throws KettleMissingPluginsException + * in case missing plugins were found (details are in the exception in that case) + */ + public TransMeta( Node transnode, Repository rep ) throws KettleXMLException, KettleMissingPluginsException { + loadXML( transnode, rep, false ); + } + + /** + * Parses an XML DOM (starting at the specified Node) that describes the transformation. + * + * @param transnode + * The XML node to load from + * @param rep + * The repository to load the default list of database connections from (null if no repository is available) + * @param setInternalVariables + * true if you want to set the internal variables based on this transformation information + * @throws KettleXMLException + * if any errors occur during parsing of the specified file + * @throws KettleMissingPluginsException + * in case missing plugins were found (details are in the exception in that case) + */ + public void loadXML( Node transnode, Repository rep, boolean setInternalVariables ) throws KettleXMLException, + KettleMissingPluginsException { + loadXML( transnode, rep, setInternalVariables, null ); + } + + /** + * Parses an XML DOM (starting at the specified Node) that describes the transformation. + * + * @param transnode + * The XML node to load from + * @param rep + * The repository to load the default list of database connections from (null if no repository is available) + * @param setInternalVariables + * true if you want to set the internal variables based on this transformation information + * @param parentVariableSpace + * the parent variable space to use during TransMeta construction + * @throws KettleXMLException + * if any errors occur during parsing of the specified file + * @throws KettleMissingPluginsException + * in case missing plugins were found (details are in the exception in that case) + */ + public void loadXML( Node transnode, Repository rep, boolean setInternalVariables, VariableSpace parentVariableSpace ) + throws KettleXMLException, KettleMissingPluginsException { + loadXML( transnode, rep, setInternalVariables, parentVariableSpace, null ); + } + + /** + * Parses an XML DOM (starting at the specified Node) that describes the transformation. + * + * @param transnode + * The XML node to load from + * @param rep + * The repository to load the default list of database connections from (null if no repository is available) + * @param setInternalVariables + * true if you want to set the internal variables based on this transformation information + * @param parentVariableSpace + * the parent variable space to use during TransMeta construction + * @param prompter + * the changed/replace listener or null if there is none + * @throws KettleXMLException + * if any errors occur during parsing of the specified file + * @throws KettleMissingPluginsException + * in case missing plugins were found (details are in the exception in that case) + */ + public void loadXML( Node transnode, Repository rep, boolean setInternalVariables, VariableSpace parentVariableSpace, + OverwritePrompter prompter ) throws KettleXMLException, KettleMissingPluginsException { + loadXML( transnode, null, rep, setInternalVariables, parentVariableSpace, prompter ); + } + + /** + * Parses an XML DOM (starting at the specified Node) that describes the transformation. + * + * @param transnode + * The XML node to load from + * @param fname + * The filename + * @param rep + * The repository to load the default list of database connections from (null if no repository is available) + * @param setInternalVariables + * true if you want to set the internal variables based on this transformation information + * @param parentVariableSpace + * the parent variable space to use during TransMeta construction + * @param prompter + * the changed/replace listener or null if there is none + * @throws KettleXMLException + * if any errors occur during parsing of the specified file + * @throws KettleMissingPluginsException + * in case missing plugins were found (details are in the exception in that case) + */ + public void loadXML( Node transnode, String fname, Repository rep, boolean setInternalVariables, + VariableSpace parentVariableSpace, OverwritePrompter prompter ) + throws KettleXMLException, KettleMissingPluginsException { + loadXML( transnode, fname, null, rep, setInternalVariables, parentVariableSpace, prompter ); + } + + /** + * Parses an XML DOM (starting at the specified Node) that describes the transformation. + * + * @param transnode + * The XML node to load from + * @param fname + * The filename + * @param rep + * The repository to load the default list of database connections from (null if no repository is available) + * @param setInternalVariables + * true if you want to set the internal variables based on this transformation information + * @param parentVariableSpace + * the parent variable space to use during TransMeta construction + * @param prompter + * the changed/replace listener or null if there is none + * @throws KettleXMLException + * if any errors occur during parsing of the specified file + * @throws KettleMissingPluginsException + * in case missing plugins were found (details are in the exception in that case) + */ + public void loadXML( Node transnode, String fname, IMetaStore metaStore, Repository rep, boolean setInternalVariables, + VariableSpace parentVariableSpace, OverwritePrompter prompter ) + throws KettleXMLException, KettleMissingPluginsException { + + KettleMissingPluginsException + missingPluginsException = + new KettleMissingPluginsException( + BaseMessages.getString( PKG, "TransMeta.MissingPluginsFoundWhileLoadingTransformation.Exception" ) ); + + this.metaStore = metaStore; // Remember this as the primary meta store. + + try { + + Props props = null; + if ( Props.isInitialized() ) { + props = Props.getInstance(); + } + + initializeVariablesFrom( parentVariableSpace ); + + try { + // Clear the transformation + clear(); + + // If we are not using a repository, we are getting the transformation from a file + // Set the filename here so it can be used in variables for ALL aspects of the transformation FIX: PDI-8890 + if ( null == rep ) { + setFilename( fname ); + } + + // Read all the database connections from the repository to make sure that we don't overwrite any there by + // loading from XML. + // + try { + sharedObjectsFile = XMLHandler.getTagValue( transnode, "info", "shared_objects_file" ); + sharedObjects = rep != null ? rep.readTransSharedObjects( this ) : readSharedObjects(); + } catch ( Exception e ) { + log + .logError( BaseMessages.getString( PKG, "TransMeta.ErrorReadingSharedObjects.Message", e.toString() ) ); + log.logError( Const.getStackTracker( e ) ); + } + + // Load the database connections, slave servers, cluster schemas & partition schemas into this object. + // + importFromMetaStore(); + + // Handle connections + int n = XMLHandler.countNodes( transnode, DatabaseMeta.XML_TAG ); + Set privateTransformationDatabases = new HashSet( n ); + if ( log.isDebug() ) { + log.logDebug( BaseMessages.getString( PKG, "TransMeta.Log.WeHaveConnections", String.valueOf( n ) ) ); + } + for ( int i = 0; i < n; i++ ) { + if ( log.isDebug() ) { + log.logDebug( BaseMessages.getString( PKG, "TransMeta.Log.LookingAtConnection" ) + i ); + } + Node nodecon = XMLHandler.getSubNodeByNr( transnode, DatabaseMeta.XML_TAG, i ); + + DatabaseMeta dbcon = new DatabaseMeta( nodecon ); + dbcon.shareVariablesWith( this ); + if ( !dbcon.isShared() ) { + privateTransformationDatabases.add( dbcon.getName() ); + } + + DatabaseMeta exist = findDatabase( dbcon.getName() ); + if ( exist == null ) { + addDatabase( dbcon ); + } else { + if ( !exist.isShared() ) // otherwise, we just keep the shared connection. + { + if ( shouldOverwrite( prompter, props, BaseMessages.getString( PKG, + "TransMeta.Message.OverwriteConnectionYN", dbcon.getName() ), BaseMessages.getString( PKG, + "TransMeta.Message.OverwriteConnection.DontShowAnyMoreMessage" ) ) ) { + int idx = indexOfDatabase( exist ); + removeDatabase( idx ); + addDatabase( idx, dbcon ); + } + } + } + } + setPrivateDatabases( privateTransformationDatabases ); + + // Read the notes... + Node notepadsnode = XMLHandler.getSubNode( transnode, XML_TAG_NOTEPADS ); + int nrnotes = XMLHandler.countNodes( notepadsnode, NotePadMeta.XML_TAG ); + for ( int i = 0; i < nrnotes; i++ ) { + Node notepadnode = XMLHandler.getSubNodeByNr( notepadsnode, NotePadMeta.XML_TAG, i ); + NotePadMeta ni = new NotePadMeta( notepadnode ); + notes.add( ni ); + } + + // Handle Steps + int s = XMLHandler.countNodes( transnode, StepMeta.XML_TAG ); + + if ( log.isDebug() ) { + log.logDebug( BaseMessages.getString( PKG, "TransMeta.Log.ReadingSteps" ) + s + " steps..." ); + } + for ( int i = 0; i < s; i++ ) { + Node stepnode = XMLHandler.getSubNodeByNr( transnode, StepMeta.XML_TAG, i ); + + if ( log.isDebug() ) { + log.logDebug( BaseMessages.getString( PKG, "TransMeta.Log.LookingAtStep" ) + i ); + } + + StepMeta stepMeta = new StepMeta( stepnode, databases, metaStore ); + stepMeta.setParentTransMeta( this ); // for tracing, retain hierarchy + + if ( stepMeta.isMissing() ) { + addMissingTrans( (MissingTrans) stepMeta.getStepMetaInterface() ); + } + // Check if the step exists and if it's a shared step. + // If so, then we will keep the shared version, not this one. + // The stored XML is only for backup purposes. + // + StepMeta check = findStep( stepMeta.getName() ); + if ( check != null ) { + if ( !check.isShared() ) { + // Don't overwrite shared objects + + addOrReplaceStep( stepMeta ); + } else { + check.setDraw( stepMeta.isDrawn() ); // Just keep the drawn flag and location + check.setLocation( stepMeta.getLocation() ); + } + } else { + addStep( stepMeta ); // simply add it. + } + } + + // Read the error handling code of the steps... + // + Node errorHandlingNode = XMLHandler.getSubNode( transnode, XML_TAG_STEP_ERROR_HANDLING ); + int nrErrorHandlers = XMLHandler.countNodes( errorHandlingNode, StepErrorMeta.XML_TAG ); + for ( int i = 0; i < nrErrorHandlers; i++ ) { + Node stepErrorMetaNode = XMLHandler.getSubNodeByNr( errorHandlingNode, StepErrorMeta.XML_TAG, i ); + StepErrorMeta stepErrorMeta = new StepErrorMeta( this, stepErrorMetaNode, steps ); + if ( stepErrorMeta.getSourceStep() != null ) { + stepErrorMeta.getSourceStep().setStepErrorMeta( stepErrorMeta ); // a bit of a trick, I know. + } + } + + // Have all StreamValueLookups, etc. reference the correct source steps... + // + for ( int i = 0; i < nrSteps(); i++ ) { + StepMeta stepMeta = getStep( i ); + StepMetaInterface sii = stepMeta.getStepMetaInterface(); + if ( sii != null ) { + sii.searchInfoAndTargetSteps( steps ); + } + } + + // Handle Hops + // + Node ordernode = XMLHandler.getSubNode( transnode, XML_TAG_ORDER ); + n = XMLHandler.countNodes( ordernode, TransHopMeta.XML_TAG ); + + if ( log.isDebug() ) { + log.logDebug( BaseMessages.getString( PKG, "TransMeta.Log.WeHaveHops" ) + n + " hops..." ); + } + for ( int i = 0; i < n; i++ ) { + if ( log.isDebug() ) { + log.logDebug( BaseMessages.getString( PKG, "TransMeta.Log.LookingAtHop" ) + i ); + } + Node hopnode = XMLHandler.getSubNodeByNr( ordernode, TransHopMeta.XML_TAG, i ); + + TransHopMeta hopinf = new TransHopMeta( hopnode, steps ); + addTransHop( hopinf ); + } + + // + // get transformation info: + // + Node infonode = XMLHandler.getSubNode( transnode, XML_TAG_INFO ); + + // Name + // + setName( XMLHandler.getTagValue( infonode, "name" ) ); + + // description + // + description = XMLHandler.getTagValue( infonode, "description" ); + + // extended description + // + extendedDescription = XMLHandler.getTagValue( infonode, "extended_description" ); + + // trans version + // + trans_version = XMLHandler.getTagValue( infonode, "trans_version" ); + + // trans status + // + trans_status = Const.toInt( XMLHandler.getTagValue( infonode, "trans_status" ), -1 ); + + String transTypeCode = XMLHandler.getTagValue( infonode, "trans_type" ); + transformationType = TransformationType.getTransformationTypeByCode( transTypeCode ); + + // Optionally load the repository directory... + // + if ( rep != null ) { + String directoryPath = XMLHandler.getTagValue( infonode, "directory" ); + if ( directoryPath != null ) { + directory = rep.findDirectory( directoryPath ); + if ( directory == null ) { // not found + directory = new RepositoryDirectory(); // The root as default + } + } + } + + // Read logging table information + // + Node logNode = XMLHandler.getSubNode( infonode, "log" ); + if ( logNode != null ) { + + // Backward compatibility... + // + Node transLogNode = XMLHandler.getSubNode( logNode, TransLogTable.XML_TAG ); + if ( transLogNode == null ) { + // Load the XML + // + transLogTable.findField( TransLogTable.ID.LINES_READ ) + .setSubject( findStep( XMLHandler.getTagValue( infonode, "log", "read" ) ) ); + transLogTable.findField( TransLogTable.ID.LINES_WRITTEN ) + .setSubject( findStep( XMLHandler.getTagValue( infonode, "log", "write" ) ) ); + transLogTable.findField( TransLogTable.ID.LINES_INPUT ) + .setSubject( findStep( XMLHandler.getTagValue( infonode, "log", "input" ) ) ); + transLogTable.findField( TransLogTable.ID.LINES_OUTPUT ) + .setSubject( findStep( XMLHandler.getTagValue( infonode, "log", "output" ) ) ); + transLogTable.findField( TransLogTable.ID.LINES_UPDATED ) + .setSubject( findStep( XMLHandler.getTagValue( infonode, "log", "update" ) ) ); + transLogTable.findField( TransLogTable.ID.LINES_REJECTED ) + .setSubject( findStep( XMLHandler.getTagValue( infonode, "log", "rejected" ) ) ); + + transLogTable.setConnectionName( XMLHandler.getTagValue( infonode, "log", "connection" ) ); + transLogTable.setSchemaName( XMLHandler.getTagValue( infonode, "log", "schema" ) ); + transLogTable.setTableName( XMLHandler.getTagValue( infonode, "log", "table" ) ); + transLogTable.findField( TransLogTable.ID.ID_BATCH ) + .setEnabled( "Y".equalsIgnoreCase( XMLHandler.getTagValue( infonode, "log", "use_batchid" ) ) ); + transLogTable.findField( TransLogTable.ID.LOG_FIELD ) + .setEnabled( "Y".equalsIgnoreCase( XMLHandler.getTagValue( infonode, "log", "USE_LOGFIELD" ) ) ); + transLogTable.setLogSizeLimit( XMLHandler.getTagValue( infonode, "log", "size_limit_lines" ) ); + transLogTable.setLogInterval( XMLHandler.getTagValue( infonode, "log", "interval" ) ); + transLogTable.findField( TransLogTable.ID.CHANNEL_ID ).setEnabled( false ); + transLogTable.findField( TransLogTable.ID.LINES_REJECTED ).setEnabled( false ); + performanceLogTable.setConnectionName( transLogTable.getConnectionName() ); + performanceLogTable.setTableName( XMLHandler.getTagValue( infonode, "log", "step_performance_table" ) ); + } else { + transLogTable.loadXML( transLogNode, databases, steps ); + } + Node perfLogNode = XMLHandler.getSubNode( logNode, PerformanceLogTable.XML_TAG ); + if ( perfLogNode != null ) { + performanceLogTable.loadXML( perfLogNode, databases, steps ); + } + Node channelLogNode = XMLHandler.getSubNode( logNode, ChannelLogTable.XML_TAG ); + if ( channelLogNode != null ) { + channelLogTable.loadXML( channelLogNode, databases, steps ); + } + Node stepLogNode = XMLHandler.getSubNode( logNode, StepLogTable.XML_TAG ); + if ( stepLogNode != null ) { + stepLogTable.loadXML( stepLogNode, databases, steps ); + } + Node metricsLogNode = XMLHandler.getSubNode( logNode, MetricsLogTable.XML_TAG ); + if ( metricsLogNode != null ) { + metricsLogTable.loadXML( metricsLogNode, databases, steps ); + } + } + + // Maxdate range options... + String maxdatcon = XMLHandler.getTagValue( infonode, "maxdate", "connection" ); + maxDateConnection = findDatabase( maxdatcon ); + maxDateTable = XMLHandler.getTagValue( infonode, "maxdate", "table" ); + maxDateField = XMLHandler.getTagValue( infonode, "maxdate", "field" ); + String offset = XMLHandler.getTagValue( infonode, "maxdate", "offset" ); + maxDateOffset = Const.toDouble( offset, 0.0 ); + String mdiff = XMLHandler.getTagValue( infonode, "maxdate", "maxdiff" ); + maxDateDifference = Const.toDouble( mdiff, 0.0 ); + + // Check the dependencies as far as dates are concerned... + // We calculate BEFORE we run the MAX of these dates + // If the date is larger then enddate, startdate is set to MIN_DATE + // + Node depsNode = XMLHandler.getSubNode( infonode, XML_TAG_DEPENDENCIES ); + int nrDeps = XMLHandler.countNodes( depsNode, TransDependency.XML_TAG ); + + for ( int i = 0; i < nrDeps; i++ ) { + Node depNode = XMLHandler.getSubNodeByNr( depsNode, TransDependency.XML_TAG, i ); + + TransDependency transDependency = new TransDependency( depNode, databases ); + if ( transDependency.getDatabase() != null && transDependency.getFieldname() != null ) { + addDependency( transDependency ); + } + } + + // Read the named parameters. + Node paramsNode = XMLHandler.getSubNode( infonode, XML_TAG_PARAMETERS ); + int nrParams = XMLHandler.countNodes( paramsNode, "parameter" ); + + for ( int i = 0; i < nrParams; i++ ) { + Node paramNode = XMLHandler.getSubNodeByNr( paramsNode, "parameter", i ); + + String paramName = XMLHandler.getTagValue( paramNode, "name" ); + String defaultValue = XMLHandler.getTagValue( paramNode, "default_value" ); + String descr = XMLHandler.getTagValue( paramNode, "description" ); + + addParameterDefinition( paramName, defaultValue, descr ); + } + + // Read the partitioning schemas + // + Node partSchemasNode = XMLHandler.getSubNode( infonode, XML_TAG_PARTITIONSCHEMAS ); + int nrPartSchemas = XMLHandler.countNodes( partSchemasNode, PartitionSchema.XML_TAG ); + for ( int i = 0; i < nrPartSchemas; i++ ) { + Node partSchemaNode = XMLHandler.getSubNodeByNr( partSchemasNode, PartitionSchema.XML_TAG, i ); + PartitionSchema partitionSchema = new PartitionSchema( partSchemaNode ); + + // Check if the step exists and if it's a shared step. + // If so, then we will keep the shared version, not this one. + // The stored XML is only for backup purposes. + // + PartitionSchema check = findPartitionSchema( partitionSchema.getName() ); + if ( check != null ) { + if ( !check.isShared() ) { + // we don't overwrite shared objects. + if ( shouldOverwrite( prompter, props, BaseMessages + .getString( PKG, "TransMeta.Message.OverwritePartitionSchemaYN", partitionSchema.getName() ), + BaseMessages.getString( PKG, "TransMeta.Message.OverwriteConnection.DontShowAnyMoreMessage" ) ) ) { + addOrReplacePartitionSchema( partitionSchema ); + } + } + } else { + partitionSchemas.add( partitionSchema ); + } + + } + + // Have all step partitioning meta-data reference the correct schemas that we just loaded + // + for ( int i = 0; i < nrSteps(); i++ ) { + StepPartitioningMeta stepPartitioningMeta = getStep( i ).getStepPartitioningMeta(); + if ( stepPartitioningMeta != null ) { + stepPartitioningMeta.setPartitionSchemaAfterLoading( partitionSchemas ); + } + StepPartitioningMeta targetStepPartitioningMeta = getStep( i ).getTargetStepPartitioningMeta(); + if ( targetStepPartitioningMeta != null ) { + targetStepPartitioningMeta.setPartitionSchemaAfterLoading( partitionSchemas ); + } + } + + // Read the slave servers... + // + Node slaveServersNode = XMLHandler.getSubNode( infonode, XML_TAG_SLAVESERVERS ); + int nrSlaveServers = XMLHandler.countNodes( slaveServersNode, SlaveServer.XML_TAG ); + for ( int i = 0; i < nrSlaveServers; i++ ) { + Node slaveServerNode = XMLHandler.getSubNodeByNr( slaveServersNode, SlaveServer.XML_TAG, i ); + SlaveServer slaveServer = new SlaveServer( slaveServerNode ); + slaveServer.shareVariablesWith( this ); + + // Check if the object exists and if it's a shared object. + // If so, then we will keep the shared version, not this one. + // The stored XML is only for backup purposes. + SlaveServer check = findSlaveServer( slaveServer.getName() ); + if ( check != null ) { + if ( !check.isShared() ) { + // we don't overwrite shared objects. + if ( shouldOverwrite( prompter, props, + BaseMessages.getString( PKG, "TransMeta.Message.OverwriteSlaveServerYN", slaveServer.getName() ), + BaseMessages.getString( PKG, "TransMeta.Message.OverwriteConnection.DontShowAnyMoreMessage" ) ) ) { + addOrReplaceSlaveServer( slaveServer ); + } + } + } else { + slaveServers.add( slaveServer ); + } + } + + // Read the cluster schemas + // + Node clusterSchemasNode = XMLHandler.getSubNode( infonode, XML_TAG_CLUSTERSCHEMAS ); + int nrClusterSchemas = XMLHandler.countNodes( clusterSchemasNode, ClusterSchema.XML_TAG ); + for ( int i = 0; i < nrClusterSchemas; i++ ) { + Node clusterSchemaNode = XMLHandler.getSubNodeByNr( clusterSchemasNode, ClusterSchema.XML_TAG, i ); + ClusterSchema clusterSchema = new ClusterSchema( clusterSchemaNode, slaveServers ); + clusterSchema.shareVariablesWith( this ); + + // Check if the object exists and if it's a shared object. + // If so, then we will keep the shared version, not this one. + // The stored XML is only for backup purposes. + ClusterSchema check = findClusterSchema( clusterSchema.getName() ); + if ( check != null ) { + if ( !check.isShared() ) { + // we don't overwrite shared objects. + if ( shouldOverwrite( prompter, props, + BaseMessages.getString( PKG, "TransMeta.Message.OverwriteClusterSchemaYN", clusterSchema.getName() ), + BaseMessages.getString( PKG, "TransMeta.Message.OverwriteConnection.DontShowAnyMoreMessage" ) ) ) { + addOrReplaceClusterSchema( clusterSchema ); + } + } + } else { + clusterSchemas.add( clusterSchema ); + } + } + + // Have all step clustering schema meta-data reference the correct cluster schemas that we just loaded + // + for ( int i = 0; i < nrSteps(); i++ ) { + getStep( i ).setClusterSchemaAfterLoading( clusterSchemas ); + } + + String srowset = XMLHandler.getTagValue( infonode, "size_rowset" ); + sizeRowset = Const.toInt( srowset, Const.ROWS_IN_ROWSET ); + sleepTimeEmpty = + Const.toInt( XMLHandler.getTagValue( infonode, "sleep_time_empty" ), Const.TIMEOUT_GET_MILLIS ); + sleepTimeFull = Const.toInt( XMLHandler.getTagValue( infonode, "sleep_time_full" ), Const.TIMEOUT_PUT_MILLIS ); + usingUniqueConnections = "Y".equalsIgnoreCase( XMLHandler.getTagValue( infonode, "unique_connections" ) ); + + feedbackShown = !"N".equalsIgnoreCase( XMLHandler.getTagValue( infonode, "feedback_shown" ) ); + feedbackSize = Const.toInt( XMLHandler.getTagValue( infonode, "feedback_size" ), Const.ROWS_UPDATE ); + usingThreadPriorityManagment = + !"N".equalsIgnoreCase( XMLHandler.getTagValue( infonode, "using_thread_priorities" ) ); + + // Performance monitoring for steps... + // + capturingStepPerformanceSnapShots = + "Y".equalsIgnoreCase( XMLHandler.getTagValue( infonode, "capture_step_performance" ) ); + stepPerformanceCapturingDelay = + Const.toLong( XMLHandler.getTagValue( infonode, "step_performance_capturing_delay" ), 1000 ); + stepPerformanceCapturingSizeLimit = XMLHandler.getTagValue( infonode, "step_performance_capturing_size_limit" ); + + // Created user/date + createdUser = XMLHandler.getTagValue( infonode, "created_user" ); + String createDate = XMLHandler.getTagValue( infonode, "created_date" ); + if ( createDate != null ) { + createdDate = XMLHandler.stringToDate( createDate ); + } + + // Changed user/date + modifiedUser = XMLHandler.getTagValue( infonode, "modified_user" ); + String modDate = XMLHandler.getTagValue( infonode, "modified_date" ); + if ( modDate != null ) { + modifiedDate = XMLHandler.stringToDate( modDate ); + } + + Node partitionDistNode = XMLHandler.getSubNode( transnode, SlaveStepCopyPartitionDistribution.XML_TAG ); + if ( partitionDistNode != null ) { + slaveStepCopyPartitionDistribution = new SlaveStepCopyPartitionDistribution( partitionDistNode ); + } else { + slaveStepCopyPartitionDistribution = new SlaveStepCopyPartitionDistribution(); // leave empty + } + + // Is this a slave transformation? + // + slaveTransformation = "Y".equalsIgnoreCase( XMLHandler.getTagValue( transnode, "slave_transformation" ) ); + if ( log.isDebug() ) { + log.logDebug( BaseMessages.getString( PKG, "TransMeta.Log.NumberOfStepsReaded" ) + nrSteps() ); + log.logDebug( BaseMessages.getString( PKG, "TransMeta.Log.NumberOfHopsReaded" ) + nrTransHops() ); + } + sortSteps(); + + // Load the attribute groups map + // + attributesMap = AttributesUtil.loadAttributes( XMLHandler.getSubNode( transnode, AttributesUtil.XML_TAG ) ); + + keyForSessionKey = XMLHandler.stringToBinary( XMLHandler.getTagValue( infonode, "key_for_session_key" ) ); + isKeyPrivate = "Y".equals( XMLHandler.getTagValue( infonode, "is_key_private" ) ); + + } catch ( KettleXMLException xe ) { + throw new KettleXMLException( BaseMessages.getString( PKG, "TransMeta.Exception.ErrorReadingTransformation" ), + xe ); + } catch ( KettleException e ) { + throw new KettleXMLException( e ); + } finally { + initializeVariablesFrom( null ); + if ( setInternalVariables ) { + setInternalKettleVariables(); + } + + ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.TransformationMetaLoaded.id, this ); + } + } catch ( Exception e ) { + // See if we have missing plugins to report, those take precedence! + // + if ( !missingPluginsException.getMissingPluginDetailsList().isEmpty() ) { + throw missingPluginsException; + } else { + throw new KettleXMLException( BaseMessages.getString( PKG, "TransMeta.Exception.ErrorReadingTransformation" ), + e ); + } + } finally { + if ( !missingPluginsException.getMissingPluginDetailsList().isEmpty() ) { + throw missingPluginsException; + } + } + } + + public byte[] getKey() { + return keyForSessionKey; + } + + public void setKey( byte[] key ) { + this.keyForSessionKey = key; + } + + public boolean isPrivateKey() { + return isKeyPrivate; + } + + public void setPrivateKey( boolean privateKey ) { + this.isKeyPrivate = privateKey; + } + + /** + * Reads the shared objects (steps, connections, etc.). + * + * @return the shared objects + * @throws KettleException + * if any errors occur while reading the shared objects + */ + public SharedObjects readSharedObjects() throws KettleException { + // Extract the shared steps, connections, etc. using the SharedObjects class + // + String soFile = environmentSubstitute( sharedObjectsFile ); + SharedObjects sharedObjects = new SharedObjects( soFile ); + if ( sharedObjects.getObjectsMap().isEmpty() ) { + log.logDetailed( BaseMessages.getString( PKG, "TransMeta.Log.EmptySharedObjectsFile", soFile ) ); + } + + // First read the databases... + // We read databases & slaves first because there might be dependencies that need to be resolved. + // + for ( SharedObjectInterface object : sharedObjects.getObjectsMap().values() ) { + if ( object instanceof DatabaseMeta ) { + DatabaseMeta databaseMeta = (DatabaseMeta) object; + databaseMeta.shareVariablesWith( this ); + addOrReplaceDatabase( databaseMeta ); + } else if ( object instanceof SlaveServer ) { + SlaveServer slaveServer = (SlaveServer) object; + slaveServer.shareVariablesWith( this ); + addOrReplaceSlaveServer( slaveServer ); + } else if ( object instanceof StepMeta ) { + StepMeta stepMeta = (StepMeta) object; + addOrReplaceStep( stepMeta ); + } else if ( object instanceof PartitionSchema ) { + PartitionSchema partitionSchema = (PartitionSchema) object; + addOrReplacePartitionSchema( partitionSchema ); + } else if ( object instanceof ClusterSchema ) { + ClusterSchema clusterSchema = (ClusterSchema) object; + clusterSchema.shareVariablesWith( this ); + addOrReplaceClusterSchema( clusterSchema ); + } + } + + return sharedObjects; + } + + /** + * Gets a List of all the steps that are used in at least one active hop. These steps will be used to execute the + * transformation. The others will not be executed.
+ * Update 3.0 : we also add those steps that are not linked to another hop, but have at least one remote input or + * output step defined. + * + * @param all + * true if you want to get ALL the steps from the transformation, false otherwise + * @return A List of steps + */ + public List getTransHopSteps( boolean all ) { + List st = new ArrayList(); + int idx; + + for ( int x = 0; x < nrTransHops(); x++ ) { + TransHopMeta hi = getTransHop( x ); + if ( hi.isEnabled() || all ) { + idx = st.indexOf( hi.getFromStep() ); // FROM + if ( idx < 0 ) { + st.add( hi.getFromStep() ); + } + + idx = st.indexOf( hi.getToStep() ); // TO + if ( idx < 0 ) { + st.add( hi.getToStep() ); + } + } + } + + // Also, add the steps that need to be painted, but are not part of a hop + for ( int x = 0; x < nrSteps(); x++ ) { + StepMeta stepMeta = getStep( x ); + if ( stepMeta.isDrawn() && !isStepUsedInTransHops( stepMeta ) ) { + st.add( stepMeta ); + } + if ( !stepMeta.getRemoteInputSteps().isEmpty() || !stepMeta.getRemoteOutputSteps().isEmpty() ) { + if ( !st.contains( stepMeta ) ) { + st.add( stepMeta ); + } + } + } + + return st; + } + + /** + * Checks if a step has been used in a hop or not. + * + * @param stepMeta + * The step queried. + * @return true if a step is used in a hop (active or not), false otherwise + */ + public boolean isStepUsedInTransHops( StepMeta stepMeta ) { + TransHopMeta fr = findTransHopFrom( stepMeta ); + TransHopMeta to = findTransHopTo( stepMeta ); + if ( fr != null || to != null ) { + return true; + } + return false; + } + + /** + * Clears the different changed flags of the transformation. + * + */ + @Override + public void clearChanged() { + changed_steps = false; + changed_hops = false; + + for ( int i = 0; i < nrSteps(); i++ ) { + getStep( i ).setChanged( false ); + if ( getStep( i ).getStepPartitioningMeta() != null ) { + getStep( i ).getStepPartitioningMeta().hasChanged( false ); + } + } + for ( int i = 0; i < nrTransHops(); i++ ) { + getTransHop( i ).setChanged( false ); + } + for ( int i = 0; i < partitionSchemas.size(); i++ ) { + partitionSchemas.get( i ).setChanged( false ); + } + for ( int i = 0; i < clusterSchemas.size(); i++ ) { + clusterSchemas.get( i ).setChanged( false ); + } + + super.clearChanged(); + } + + /** + * Checks whether or not the steps have changed. + * + * @return true if the steps have been changed, false otherwise + */ + public boolean haveStepsChanged() { + if ( changed_steps ) { + return true; + } + + for ( int i = 0; i < nrSteps(); i++ ) { + StepMeta stepMeta = getStep( i ); + if ( stepMeta.hasChanged() ) { + return true; + } + if ( stepMeta.getStepPartitioningMeta() != null && stepMeta.getStepPartitioningMeta().hasChanged() ) { + return true; + } + } + return false; + } + + /** + * Checks whether or not any of the hops have been changed. + * + * @return true if a hop has been changed, false otherwise + */ + public boolean haveHopsChanged() { + if ( changed_hops ) { + return true; + } + + for ( int i = 0; i < nrTransHops(); i++ ) { + TransHopMeta hi = getTransHop( i ); + if ( hi.hasChanged() ) { + return true; + } + } + return false; + } + + /** + * Checks whether or not any of the partitioning schemas have been changed. + * + * @return true if the partitioning schemas have been changed, false otherwise + */ + public boolean havePartitionSchemasChanged() { + for ( int i = 0; i < partitionSchemas.size(); i++ ) { + PartitionSchema ps = partitionSchemas.get( i ); + if ( ps.hasChanged() ) { + return true; + } + } + + return false; + } + + /** + * Checks whether or not any of the clustering schemas have been changed. + * + * @return true if the clustering schemas have been changed, false otherwise + */ + public boolean haveClusterSchemasChanged() { + for ( int i = 0; i < clusterSchemas.size(); i++ ) { + ClusterSchema cs = clusterSchemas.get( i ); + if ( cs.hasChanged() ) { + return true; + } + } + + return false; + } + + /** + * Checks whether or not the transformation has changed. + * + * @return true if the transformation has changed, false otherwise + */ + @Override + public boolean hasChanged() { + if ( super.hasChanged() ) { + return true; + } + if ( haveStepsChanged() ) { + return true; + } + if ( haveHopsChanged() ) { + return true; + } + if ( havePartitionSchemasChanged() ) { + return true; + } + if ( haveClusterSchemasChanged() ) { + return true; + } + + return false; + } + + /** + * See if there are any loops in the transformation, starting at the indicated step. This works by looking at all the + * previous steps. If you keep going backward and find the step, there is a loop. Both the informational and the + * normal steps need to be checked for loops! + * + * @param stepMeta + * The step position to start looking + * + * @return true if a loop has been found, false if no loop is found. + */ + public boolean hasLoop( StepMeta stepMeta ) { + clearLoopCache(); + return hasLoop( stepMeta, null, true ) || hasLoop( stepMeta, null, false ); + } + + /** + * See if there are any loops in the transformation, starting at the indicated step. This works by looking at all the + * previous steps. If you keep going backward and find the original step again, there is a loop. + * + * @param stepMeta + * The step position to start looking + * @param lookup + * The original step when wandering around the transformation. + * @param info + * Check the informational steps or not. + * + * @return true if a loop has been found, false if no loop is found. + */ + private boolean hasLoop( StepMeta stepMeta, StepMeta lookup, boolean info ) { + String + cacheKey = + stepMeta.getName() + " - " + ( lookup != null ? lookup.getName() : "" ) + " - " + ( info ? "true" : "false" ); + Boolean loop = loopCache.get( cacheKey ); + if ( loop != null ) { + return loop.booleanValue(); + } + + boolean hasLoop = false; + + int nr = findNrPrevSteps( stepMeta, info ); + for ( int i = 0; i < nr && !hasLoop; i++ ) { + StepMeta prevStepMeta = findPrevStep( stepMeta, i, info ); + if ( prevStepMeta != null ) { + if ( prevStepMeta.equals( stepMeta ) ) { + hasLoop = true; + break; // no need to check more but caching this one below + } else if ( prevStepMeta.equals( lookup ) ) { + hasLoop = true; + break; // no need to check more but caching this one below + } else if ( hasLoop( prevStepMeta, lookup == null ? stepMeta : lookup, info ) ) { + hasLoop = true; + break; // no need to check more but caching this one below + } + } + } + + // Store in the cache... + // + loopCache.put( cacheKey, Boolean.valueOf( hasLoop ) ); + + return hasLoop; + } + + /** + * Mark all steps in the transformation as selected. + * + */ + public void selectAll() { + int i; + for ( i = 0; i < nrSteps(); i++ ) { + StepMeta stepMeta = getStep( i ); + stepMeta.setSelected( true ); + } + for ( i = 0; i < nrNotes(); i++ ) { + NotePadMeta ni = getNote( i ); + ni.setSelected( true ); + } + + setChanged(); + notifyObservers( "refreshGraph" ); + } + + /** + * Clear the selection of all steps. + * + */ + public void unselectAll() { + int i; + for ( i = 0; i < nrSteps(); i++ ) { + StepMeta stepMeta = getStep( i ); + stepMeta.setSelected( false ); + } + for ( i = 0; i < nrNotes(); i++ ) { + NotePadMeta ni = getNote( i ); + ni.setSelected( false ); + } + } + + /** + * Get an array of all the selected step locations. + * + * @return The selected step locations. + */ + public Point[] getSelectedStepLocations() { + List points = new ArrayList(); + + for ( StepMeta stepMeta : getSelectedSteps() ) { + Point p = stepMeta.getLocation(); + points.add( new Point( p.x, p.y ) ); // explicit copy of location + } + + return points.toArray( new Point[points.size()] ); + } + + /** + * Get an array of all the selected note locations. + * + * @return The selected note locations. + */ + public Point[] getSelectedNoteLocations() { + List points = new ArrayList(); + + for ( NotePadMeta ni : getSelectedNotes() ) { + Point p = ni.getLocation(); + points.add( new Point( p.x, p.y ) ); // explicit copy of location + } + + return points.toArray( new Point[points.size()] ); + } + + /** + * Gets a list of the selected steps. + * + * @return A list of all the selected steps. + */ + public List getSelectedSteps() { + List selection = new ArrayList(); + for ( StepMeta stepMeta : steps ) { + if ( stepMeta.isSelected() ) { + selection.add( stepMeta ); + } + + } + return selection; + } + + /** + * Gets an array of all the selected step names. + * + * @return An array of all the selected step names. + */ + public String[] getSelectedStepNames() { + List selection = getSelectedSteps(); + String[] retval = new String[selection.size()]; + for ( int i = 0; i < retval.length; i++ ) { + StepMeta stepMeta = selection.get( i ); + retval[i] = stepMeta.getName(); + } + return retval; + } + + /** + * Gets an array of the locations of an array of steps. + * + * @param steps + * An array of steps + * @return an array of the locations of an array of steps + */ + public int[] getStepIndexes( List steps ) { + int[] retval = new int[steps.size()]; + + for ( int i = 0; i < steps.size(); i++ ) { + retval[i] = indexOfStep( steps.get( i ) ); + } + + return retval; + } + + /** + * Gets the maximum size of the canvas by calculating the maximum location of a step. + * + * @return Maximum coordinate of a step in the transformation + (100,100) for safety. + */ + public Point getMaximum() { + int maxx = 0, maxy = 0; + for ( int i = 0; i < nrSteps(); i++ ) { + StepMeta stepMeta = getStep( i ); + Point loc = stepMeta.getLocation(); + if ( loc.x > maxx ) { + maxx = loc.x; + } + if ( loc.y > maxy ) { + maxy = loc.y; + } + } + for ( int i = 0; i < nrNotes(); i++ ) { + NotePadMeta notePadMeta = getNote( i ); + Point loc = notePadMeta.getLocation(); + if ( loc.x + notePadMeta.width > maxx ) { + maxx = loc.x + notePadMeta.width; + } + if ( loc.y + notePadMeta.height > maxy ) { + maxy = loc.y + notePadMeta.height; + } + } + + return new Point( maxx + 100, maxy + 100 ); + } + + /** + * Gets the minimum point on the canvas of a transformation. + * + * @return Minimum coordinate of a step in the transformation + */ + public Point getMinimum() { + int minx = Integer.MAX_VALUE, miny = Integer.MAX_VALUE; + for ( int i = 0; i < nrSteps(); i++ ) { + StepMeta stepMeta = getStep( i ); + Point loc = stepMeta.getLocation(); + if ( loc.x < minx ) { + minx = loc.x; + } + if ( loc.y < miny ) { + miny = loc.y; + } + } + for ( int i = 0; i < nrNotes(); i++ ) { + NotePadMeta notePadMeta = getNote( i ); + Point loc = notePadMeta.getLocation(); + if ( loc.x < minx ) { + minx = loc.x; + } + if ( loc.y < miny ) { + miny = loc.y; + } + } + + if ( minx > BORDER_INDENT && minx != Integer.MAX_VALUE ) { + minx -= BORDER_INDENT; + } else { + minx = 0; + } + if ( miny > BORDER_INDENT && miny != Integer.MAX_VALUE ) { + miny -= BORDER_INDENT; + } else { + miny = 0; + } + + return new Point( minx, miny ); + } + + /** + * Gets the names of all the steps. + * + * @return An array of step names. + */ + public String[] getStepNames() { + String[] retval = new String[nrSteps()]; + + for ( int i = 0; i < nrSteps(); i++ ) { + retval[i] = getStep( i ).getName(); + } + + return retval; + } + + /** + * Gets all the steps as an array. + * + * @return An array of all the steps in the transformation. + */ + public StepMeta[] getStepsArray() { + StepMeta[] retval = new StepMeta[nrSteps()]; + + for ( int i = 0; i < nrSteps(); i++ ) { + retval[i] = getStep( i ); + } + + return retval; + } + + /** + * Looks in the transformation to find a step in a previous location starting somewhere. + * + * @param startStep + * The starting step + * @param stepToFind + * The step to look for backward in the transformation + * @return true if we can find the step in an earlier location in the transformation. + */ + public boolean findPrevious( StepMeta startStep, StepMeta stepToFind ) { + String key = startStep.getName() + " - " + stepToFind.getName(); + Boolean result = loopCache.get( key ); + if ( result != null ) { + return result; + } + + // Normal steps + // + List previousSteps = findPreviousSteps( startStep, false ); + for ( int i = 0; i < previousSteps.size(); i++ ) { + StepMeta stepMeta = previousSteps.get( i ); + if ( stepMeta.equals( stepToFind ) ) { + loopCache.put( key, true ); + return true; + } + + boolean found = findPrevious( stepMeta, stepToFind ); // Look further back in the tree. + if ( found ) { + loopCache.put( key, true ); + return true; + } + } + + // Info steps + List infoSteps = findPreviousSteps( startStep, true ); + for ( int i = 0; i < infoSteps.size(); i++ ) { + StepMeta stepMeta = infoSteps.get( i ); + if ( stepMeta.equals( stepToFind ) ) { + loopCache.put( key, true ); + return true; + } + + boolean found = findPrevious( stepMeta, stepToFind ); // Look further back in the tree. + if ( found ) { + loopCache.put( key, true ); + return true; + } + } + + loopCache.put( key, false ); + return false; + } + + /** + * Puts the steps in alphabetical order. + */ + public void sortSteps() { + try { + Collections.sort( steps ); + } catch ( Exception e ) { + log.logError( BaseMessages.getString( PKG, "TransMeta.Exception.ErrorOfSortingSteps" ) + e ); + log.logError( Const.getStackTracker( e ) ); + } + } + + /** + * Sorts all the hops in the transformation. + */ + public void sortHops() { + Collections.sort( hops ); + } + + /** The previous count. */ + private long prevCount; + + /** + * Puts the steps in a more natural order: from start to finish. For the moment, we ignore splits and joins. Splits + * and joins can't be listed sequentially in any case! + * + * @return a map containing all the previous steps per step + */ + public Map> sortStepsNatural() { + long startTime = System.currentTimeMillis(); + + prevCount = 0; + + // First create a map where all the previous steps of another step are kept... + // + final Map> stepMap = new HashMap>(); + + // Also cache the previous steps + // + final Map> previousCache = new HashMap>(); + + // Cache calculation of steps before another + // + Map> beforeCache = new HashMap>(); + + for ( StepMeta stepMeta : steps ) { + // What are the previous steps? (cached version for performance) + // + List prevSteps = previousCache.get( stepMeta ); + if ( prevSteps == null ) { + prevSteps = findPreviousSteps( stepMeta ); + prevCount++; + previousCache.put( stepMeta, prevSteps ); + } + + // Now get the previous steps recursively, store them in the step map + // + for ( StepMeta prev : prevSteps ) { + Map beforePrevMap = updateFillStepMap( previousCache, beforeCache, stepMeta, prev ); + stepMap.put( stepMeta, beforePrevMap ); + + // Store it also in the beforeCache... + // + beforeCache.put( prev, beforePrevMap ); + } + } + + Collections.sort( steps, new Comparator() { + + public int compare( StepMeta o1, StepMeta o2 ) { + + Map beforeMap = stepMap.get( o1 ); + if ( beforeMap != null ) { + if ( beforeMap.get( o2 ) == null ) { + return -1; + } else { + return 1; + } + } else { + return o1.getName().compareToIgnoreCase( o2.getName() ); + } + } + } ); + + long endTime = System.currentTimeMillis(); + log.logBasic( + BaseMessages.getString( PKG, "TransMeta.Log.TimeExecutionStepSort", ( endTime - startTime ), prevCount ) ); + + return stepMap; + } + + /** + * Fills a map with all steps previous to the given step. This method uses a caching technique, so if a map is + * provided that contains the specified previous step, it is immediately returned to avoid unnecessary processing. + * Otherwise, the previous steps are determined and added to the map recursively, and a cache is constructed for later + * use. + * + * @param previousCache + * the previous cache, must be non-null + * @param beforeCache + * the before cache, must be non-null + * @param originStepMeta + * the origin step meta + * @param previousStepMeta + * the previous step meta + * @return the map + */ + private Map updateFillStepMap( Map> previousCache, + Map> beforeCache, StepMeta originStepMeta, StepMeta previousStepMeta ) { + + // See if we have a hash map to store step occurrence (located before the step) + // + Map beforeMap = beforeCache.get( previousStepMeta ); + if ( beforeMap == null ) { + beforeMap = new HashMap(); + } else { + return beforeMap; // Nothing left to do here! + } + + // Store the current previous step in the map + // + beforeMap.put( previousStepMeta, Boolean.TRUE ); + + // Figure out all the previous steps as well, they all need to go in there... + // + List prevSteps = previousCache.get( previousStepMeta ); + if ( prevSteps == null ) { + prevSteps = findPreviousSteps( previousStepMeta ); + prevCount++; + previousCache.put( previousStepMeta, prevSteps ); + } + + // Now, get the previous steps for stepMeta recursively... + // We only do this when the beforeMap is not known yet... + // + for ( StepMeta prev : prevSteps ) { + Map beforePrevMap = updateFillStepMap( previousCache, beforeCache, originStepMeta, prev ); + + // Keep a copy in the cache... + // + beforeCache.put( prev, beforePrevMap ); + + // Also add it to the new map for this step... + // + beforeMap.putAll( beforePrevMap ); + } + + return beforeMap; + } + + /** + * Sorts the hops in a natural way: from beginning to end. + */ + public void sortHopsNatural() { + // Loop over the hops... + for ( int j = 0; j < nrTransHops(); j++ ) { + // Buble sort: we need to do this several times... + for ( int i = 0; i < nrTransHops() - 1; i++ ) { + TransHopMeta one = getTransHop( i ); + TransHopMeta two = getTransHop( i + 1 ); + + StepMeta a = two.getFromStep(); + StepMeta b = one.getToStep(); + + if ( !findPrevious( a, b ) && !a.equals( b ) ) { + setTransHop( i + 1, one ); + setTransHop( i, two ); + } + } + } + } + + /** + * Determines the impact of the different steps in a transformation on databases, tables and field. + * + * @param impact + * An ArrayList of DatabaseImpact objects. + * @param monitor + * a progress monitor listener to be updated as the transformation is analyzed + * @throws KettleStepException + * if any errors occur during analysis + */ + public void analyseImpact( List impact, ProgressMonitorListener monitor ) throws KettleStepException { + if ( monitor != null ) { + monitor + .beginTask( BaseMessages.getString( PKG, "TransMeta.Monitor.DeterminingImpactTask.Title" ), nrSteps() ); + } + boolean stop = false; + for ( int i = 0; i < nrSteps() && !stop; i++ ) { + if ( monitor != null ) { + monitor.subTask( + BaseMessages.getString( PKG, "TransMeta.Monitor.LookingAtStepTask.Title" ) + ( i + 1 ) + "/" + nrSteps() ); + } + StepMeta stepMeta = getStep( i ); + + RowMetaInterface prev = getPrevStepFields( stepMeta ); + StepMetaInterface stepint = stepMeta.getStepMetaInterface(); + RowMetaInterface inform = null; + StepMeta[] lu = getInfoStep( stepMeta ); + if ( lu != null ) { + inform = getStepFields( lu ); + } else { + inform = stepint.getTableFields(); + } + + compatibleAnalyseImpactStep( impact, stepint, this, stepMeta, prev, inform ); + stepint.analyseImpact( impact, this, stepMeta, prev, null, null, inform, repository, metaStore ); + + if ( monitor != null ) { + monitor.worked( 1 ); + stop = monitor.isCanceled(); + } + } + + if ( monitor != null ) { + monitor.done(); + } + } + + @SuppressWarnings( "deprecation" ) + private void compatibleAnalyseImpactStep( List impact, StepMetaInterface stepint, TransMeta transMeta, + StepMeta stepMeta, RowMetaInterface prev, RowMetaInterface inform ) throws KettleStepException { + stepint.analyseImpact( impact, transMeta, stepMeta, prev, null, null, inform ); + } + + /** + * Proposes an alternative stepname when the original already exists. + * + * @param stepname + * The stepname to find an alternative for + * @return The suggested alternative stepname. + */ + public String getAlternativeStepname( String stepname ) { + String newname = stepname; + StepMeta stepMeta = findStep( newname ); + int nr = 1; + while ( stepMeta != null ) { + nr++; + newname = stepname + " " + nr; + stepMeta = findStep( newname ); + } + + return newname; + } + + /** + * Builds a list of all the SQL statements that this transformation needs in order to work properly. + * + * @return An ArrayList of SQLStatement objects. + * @throws KettleStepException + * if any errors occur during SQL statement generation + */ + public List getSQLStatements() throws KettleStepException { + return getSQLStatements( null ); + } + + /** + * Builds a list of all the SQL statements that this transformation needs in order to work properly. + * + * @param monitor + * a progress monitor listener to be updated as the SQL statements are generated + * @return An ArrayList of SQLStatement objects. + * @throws KettleStepException + * if any errors occur during SQL statement generation + */ + public List getSQLStatements( ProgressMonitorListener monitor ) throws KettleStepException { + if ( monitor != null ) { + monitor.beginTask( BaseMessages.getString( PKG, "TransMeta.Monitor.GettingTheSQLForTransformationTask.Title" ), nrSteps() + 1 ); + } + List stats = new ArrayList(); + + for ( int i = 0; i < nrSteps(); i++ ) { + StepMeta stepMeta = getStep( i ); + if ( monitor != null ) { + monitor.subTask( + BaseMessages.getString( PKG, "TransMeta.Monitor.GettingTheSQLForStepTask.Title", "" + stepMeta ) ); + } + RowMetaInterface prev = getPrevStepFields( stepMeta ); + SQLStatement sqlCompat = compatibleStepMetaGetSQLStatements( stepMeta.getStepMetaInterface(), stepMeta, prev ); + if ( sqlCompat.getSQL() != null || sqlCompat.hasError() ) { + stats.add( sqlCompat ); + } + SQLStatement + sql = + stepMeta.getStepMetaInterface().getSQLStatements( this, stepMeta, prev, repository, metaStore ); + if ( sql.getSQL() != null || sql.hasError() ) { + stats.add( sql ); + } + if ( monitor != null ) { + monitor.worked( 1 ); + } + } + + // Also check the sql for the logtable... + // + if ( monitor != null ) { + monitor.subTask( BaseMessages.getString( PKG, "TransMeta.Monitor.GettingTheSQLForTransformationTask.Title2" ) ); + } + if ( transLogTable.getDatabaseMeta() != null && ( !Const.isEmpty( transLogTable.getTableName() ) || !Const + .isEmpty( performanceLogTable.getTableName() ) ) ) { + try { + for ( LogTableInterface logTable : new LogTableInterface[] { transLogTable, performanceLogTable, + channelLogTable, stepLogTable, } ) { + if ( logTable.getDatabaseMeta() != null && !Const.isEmpty( logTable.getTableName() ) ) { + + Database db = null; + try { + db = new Database( this, transLogTable.getDatabaseMeta() ); + db.shareVariablesWith( this ); + db.connect(); + + RowMetaInterface fields = logTable.getLogRecord( LogStatus.START, null, null ).getRowMeta(); + String + schemaTable = + logTable.getDatabaseMeta() + .getQuotedSchemaTableCombination( logTable.getSchemaName(), logTable.getTableName() ); + String sql = db.getDDL( schemaTable, fields ); + if ( !Const.isEmpty( sql ) ) { + SQLStatement stat = new SQLStatement( "", transLogTable.getDatabaseMeta(), sql ); + stats.add( stat ); + } + } catch ( Exception e ) { + throw new KettleDatabaseException( + "Unable to connect to logging database [" + logTable.getDatabaseMeta() + "]", e ); + } finally { + if ( db != null ) { + db.disconnect(); + } + } + } + } + } catch ( KettleDatabaseException dbe ) { + SQLStatement stat = new SQLStatement( "", transLogTable.getDatabaseMeta(), null ); + stat.setError( + BaseMessages.getString( PKG, "TransMeta.SQLStatement.ErrorDesc.ErrorObtainingTransformationLogTableInfo" ) + + dbe.getMessage() ); + stats.add( stat ); + } + } + if ( monitor != null ) { + monitor.worked( 1 ); + } + if ( monitor != null ) { + monitor.done(); + } + + return stats; + } + + @SuppressWarnings( "deprecation" ) + private SQLStatement compatibleStepMetaGetSQLStatements( StepMetaInterface stepMetaInterface, StepMeta stepMeta, + RowMetaInterface prev ) throws KettleStepException { + return stepMetaInterface.getSQLStatements( this, stepMeta, prev ); + } + + /** + * Get the SQL statements (needed to run this transformation) as a single String. + * + * @return the SQL statements needed to run this transformation + * @throws KettleStepException + * if any errors occur during SQL statement generation + */ + public String getSQLStatementsString() throws KettleStepException { + String sql = ""; + List stats = getSQLStatements(); + for ( int i = 0; i < stats.size(); i++ ) { + SQLStatement stat = stats.get( i ); + if ( !stat.hasError() && stat.hasSQL() ) { + sql += stat.getSQL(); + } + } + + return sql; + } + + /** + * Checks all the steps and fills a List of (CheckResult) remarks. + * + * @param remarks + * The remarks list to add to. + * @param only_selected + * true to check only the selected steps, false for all steps + * @param monitor + * a progress monitor listener to be updated as the SQL statements are generated + */ + @Deprecated + public void checkSteps( List remarks, boolean only_selected, ProgressMonitorListener monitor ) { + checkSteps( remarks, only_selected, monitor, this, null, null ); + } + + /** + * Checks all the steps and fills a List of (CheckResult) remarks. + * + * @param remarks + * The remarks list to add to. + * @param only_selected + * true to check only the selected steps, false for all steps + * @param monitor + * a progress monitor listener to be updated as the SQL statements are generated + */ + public void checkSteps( List remarks, boolean only_selected, ProgressMonitorListener monitor, + VariableSpace space, Repository repository, IMetaStore metaStore ) { + try { + remarks.clear(); // Start with a clean slate... + + Map values = new Hashtable(); + String[] stepnames; + StepMeta[] steps; + List selectedSteps = getSelectedSteps(); + if ( !only_selected || selectedSteps.isEmpty() ) { + stepnames = getStepNames(); + steps = getStepsArray(); + } else { + stepnames = getSelectedStepNames(); + steps = selectedSteps.toArray( new StepMeta[selectedSteps.size()] ); + } + + ExtensionPointHandler.callExtensionPoint( getLogChannel(), KettleExtensionPoint.BeforeCheckSteps.id, + new CheckStepsExtension( remarks, space, this, steps, repository, metaStore ) ); + + boolean stop_checking = false; + + if ( monitor != null ) { + monitor.beginTask( BaseMessages.getString( PKG, "TransMeta.Monitor.VerifyingThisTransformationTask.Title" ), + steps.length + 2 ); + } + + for ( int i = 0; i < steps.length && !stop_checking; i++ ) { + if ( monitor != null ) { + monitor.subTask( BaseMessages.getString( PKG, "TransMeta.Monitor.VerifyingStepTask.Title", stepnames[i] ) ); + } + + StepMeta stepMeta = steps[i]; + + int nrinfo = findNrInfoSteps( stepMeta ); + StepMeta[] infostep = null; + if ( nrinfo > 0 ) { + infostep = getInfoStep( stepMeta ); + } + + RowMetaInterface info = null; + if ( infostep != null ) { + try { + info = getStepFields( infostep ); + } catch ( KettleStepException kse ) { + info = null; + CheckResult + cr = + new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages.getString( PKG, + "TransMeta.CheckResult.TypeResultError.ErrorOccurredGettingStepInfoFields.Description", + "" + stepMeta, Const.CR + kse.getMessage() ), stepMeta ); + remarks.add( cr ); + } + } + + // The previous fields from non-informative steps: + RowMetaInterface prev = null; + try { + prev = getPrevStepFields( stepMeta ); + } catch ( KettleStepException kse ) { + CheckResult + cr = + new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages + .getString( PKG, "TransMeta.CheckResult.TypeResultError.ErrorOccurredGettingInputFields.Description", + "" + stepMeta, Const.CR + kse.getMessage() ), stepMeta ); + remarks.add( cr ); + // This is a severe error: stop checking... + // Otherwise we wind up checking time & time again because nothing gets put in the database + // cache, the timeout of certain databases is very long... (Oracle) + stop_checking = true; + } + + if ( isStepUsedInTransHops( stepMeta ) ) { + // Get the input & output steps! + // Copy to arrays: + String[] input = getPrevStepNames( stepMeta ); + String[] output = getNextStepNames( stepMeta ); + + // Check step specific info... + ExtensionPointHandler.callExtensionPoint( getLogChannel(), KettleExtensionPoint.BeforeCheckStep.id, + new CheckStepsExtension( remarks, space, this, new StepMeta[] { stepMeta }, repository, metaStore ) ); + stepMeta.check( remarks, this, prev, input, output, info, space, repository, metaStore ); + ExtensionPointHandler.callExtensionPoint( getLogChannel(), KettleExtensionPoint.AfterCheckStep.id, + new CheckStepsExtension( remarks, space, this, new StepMeta[] { stepMeta }, repository, metaStore ) ); + + // See if illegal characters etc. were used in field-names... + if ( prev != null ) { + for ( int x = 0; x < prev.size(); x++ ) { + ValueMetaInterface v = prev.getValueMeta( x ); + String name = v.getName(); + if ( name == null ) { + values.put( v, + BaseMessages.getString( PKG, "TransMeta.Value.CheckingFieldName.FieldNameIsEmpty.Description" ) ); + } else if ( name.indexOf( ' ' ) >= 0 ) { + values.put( v, BaseMessages + .getString( PKG, "TransMeta.Value.CheckingFieldName.FieldNameContainsSpaces.Description" ) ); + } else { + char[] list = + new char[] { '.', ',', '-', '/', '+', '*', '\'', '\t', '"', '|', '@', '(', ')', '{', '}', '!', + '^' }; + for ( int c = 0; c < list.length; c++ ) { + if ( name.indexOf( list[c] ) >= 0 ) { + values.put( v, BaseMessages.getString( PKG, + "TransMeta.Value.CheckingFieldName.FieldNameContainsUnfriendlyCodes.Description", + String.valueOf( list[c] ) ) ); + } + } + } + } + + // Check if 2 steps with the same name are entering the step... + if ( prev.size() > 1 ) { + String[] fieldNames = prev.getFieldNames(); + String[] sortedNames = Const.sortStrings( fieldNames ); + + String prevName = sortedNames[0]; + for ( int x = 1; x < sortedNames.length; x++ ) { + // Checking for doubles + if ( prevName.equalsIgnoreCase( sortedNames[x] ) ) { + // Give a warning!! + CheckResult + cr = + new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages + .getString( PKG, "TransMeta.CheckResult.TypeResultWarning.HaveTheSameNameField.Description", + prevName ), stepMeta ); + remarks.add( cr ); + } else { + prevName = sortedNames[x]; + } + } + } + } else { + CheckResult + cr = + new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages + .getString( PKG, "TransMeta.CheckResult.TypeResultError.CannotFindPreviousFields.Description" ) + + stepMeta.getName(), stepMeta ); + remarks.add( cr ); + } + } else { + CheckResult + cr = + new CheckResult( CheckResultInterface.TYPE_RESULT_WARNING, + BaseMessages.getString( PKG, "TransMeta.CheckResult.TypeResultWarning.StepIsNotUsed.Description" ), + stepMeta ); + remarks.add( cr ); + } + + // Also check for mixing rows... + try { + checkRowMixingStatically( stepMeta, null ); + } catch ( KettleRowException e ) { + CheckResult cr = new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, e.getMessage(), stepMeta ); + remarks.add( cr ); + } + + if ( monitor != null ) { + monitor.worked( 1 ); // progress bar... + if ( monitor.isCanceled() ) { + stop_checking = true; + } + } + } + + // Also, check the logging table of the transformation... + if ( monitor == null || !monitor.isCanceled() ) { + if ( monitor != null ) { + monitor.subTask( BaseMessages.getString( PKG, "TransMeta.Monitor.CheckingTheLoggingTableTask.Title" ) ); + } + if ( transLogTable.getDatabaseMeta() != null ) { + Database logdb = new Database( this, transLogTable.getDatabaseMeta() ); + logdb.shareVariablesWith( this ); + try { + logdb.connect(); + CheckResult + cr = + new CheckResult( CheckResultInterface.TYPE_RESULT_OK, + BaseMessages.getString( PKG, "TransMeta.CheckResult.TypeResultOK.ConnectingWorks.Description" ), + null ); + remarks.add( cr ); + + if ( transLogTable.getTableName() != null ) { + if ( logdb.checkTableExists( transLogTable.getTableName() ) ) { + cr = + new CheckResult( CheckResultInterface.TYPE_RESULT_OK, BaseMessages + .getString( PKG, "TransMeta.CheckResult.TypeResultOK.LoggingTableExists.Description", + transLogTable.getTableName() ), null ); + remarks.add( cr ); + + RowMetaInterface fields = transLogTable.getLogRecord( LogStatus.START, null, null ).getRowMeta(); + String sql = logdb.getDDL( transLogTable.getTableName(), fields ); + if ( sql == null || sql.length() == 0 ) { + cr = + new CheckResult( CheckResultInterface.TYPE_RESULT_OK, + BaseMessages.getString( PKG, "TransMeta.CheckResult.TypeResultOK.CorrectLayout.Description" ), + null ); + remarks.add( cr ); + } else { + cr = + new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages.getString( PKG, + "TransMeta.CheckResult.TypeResultError.LoggingTableNeedsAdjustments.Description" ) + Const.CR + + sql, null ); + remarks.add( cr ); + } + + } else { + cr = + new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages + .getString( PKG, "TransMeta.CheckResult.TypeResultError.LoggingTableDoesNotExist.Description" ), + null ); + remarks.add( cr ); + } + } else { + cr = + new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages + .getString( PKG, "TransMeta.CheckResult.TypeResultError.LogTableNotSpecified.Description" ), + null ); + remarks.add( cr ); + } + } catch ( KettleDatabaseException dbe ) { + // Ignore errors + } finally { + logdb.disconnect(); + } + } + if ( monitor != null ) { + monitor.worked( 1 ); + } + + } + + if ( monitor != null ) { + monitor.subTask( BaseMessages + .getString( PKG, "TransMeta.Monitor.CheckingForDatabaseUnfriendlyCharactersInFieldNamesTask.Title" ) ); + } + if ( values.size() > 0 ) { + for ( ValueMetaInterface v : values.keySet() ) { + String message = values.get( v ); + CheckResult + cr = + new CheckResult( CheckResultInterface.TYPE_RESULT_WARNING, BaseMessages + .getString( PKG, "TransMeta.CheckResult.TypeResultWarning.Description", v.getName(), message, + v.getOrigin() ), findStep( v.getOrigin() ) ); + remarks.add( cr ); + } + } else { + CheckResult + cr = + new CheckResult( CheckResultInterface.TYPE_RESULT_OK, + BaseMessages.getString( PKG, "TransMeta.CheckResult.TypeResultOK.Description" ), null ); + remarks.add( cr ); + } + if ( monitor != null ) { + monitor.worked( 1 ); + } + ExtensionPointHandler.callExtensionPoint( getLogChannel(), KettleExtensionPoint.AfterCheckSteps.id, + new CheckStepsExtension( remarks, space, this, steps, repository, metaStore ) ); + } catch ( Exception e ) { + log.logError( Const.getStackTracker( e ) ); + throw new RuntimeException( e ); + } + + } + + /** + * Gets the result rows. + * + * @return a list containing the result rows. + * @deprecated Moved to Trans to make this class stateless + */ + @Deprecated + public List getResultRows() { + return resultRows; + } + + /** + * Sets the list of result rows. + * + * @param resultRows + * The list of result rows to set. + * @deprecated Moved to Trans to make this class stateless + */ + @Deprecated + public void setResultRows( List resultRows ) { + this.resultRows = resultRows; + } + + /** + * Gets the repository directory path and name of the transformation. + * + * @return The repository directory path plus the name of the transformation + */ + public String getPathAndName() { + if ( getRepositoryDirectory().isRoot() ) { + return getRepositoryDirectory().getPath() + getName(); + } else { + return getRepositoryDirectory().getPath() + RepositoryDirectory.DIRECTORY_SEPARATOR + getName(); + } + } + + /** + * Gets the arguments used for this transformation. + * + * @return an array of String arguments for the transformation + * @deprecated moved to Trans + */ + @Deprecated + public String[] getArguments() { + return arguments; + } + + /** + * Sets the arguments used for this transformation. + * + * @param arguments + * The arguments to set. + * @deprecated moved to Trans + */ + @Deprecated + public void setArguments( String[] arguments ) { + this.arguments = arguments; + } + + /** + * Gets the counters (database sequence values, e.g.) for the transformation. + * + * @return a named table of counters. + * @deprecated moved to Trans + */ + @Deprecated + public Hashtable getCounters() { + return counters; + } + + /** + * Sets the counters (database sequence values, e.g.) for the transformation. + * + * @param counters + * The counters to set. + * @deprecated moved to Trans + */ + @Deprecated + public void setCounters( Hashtable counters ) { + this.counters = counters; + } + + /** + * Gets a list of dependencies for the transformation + * + * @return a list of the dependencies for the transformation + */ + public List getDependencies() { + return dependencies; + } + + /** + * Sets the dependencies for the transformation. + * + * @param dependencies + * The dependency list to set. + */ + public void setDependencies( List dependencies ) { + this.dependencies = dependencies; + } + + /** + * Gets the database connection associated with "max date" processing. The connection, along with a specified table + * and field, allows for the filtering of the number of rows to process in a transformation by time, such as only + * processing the rows/records since the last time the transformation ran correctly. This can be used for auditing and + * throttling data during warehousing operations. + * + * @return Returns the meta-data associated with the most recent database connection. + */ + public DatabaseMeta getMaxDateConnection() { + return maxDateConnection; + } + + /** + * Sets the database connection associated with "max date" processing. + * + * @param maxDateConnection + * the database meta-data to set + * @see #getMaxDateConnection() + */ + public void setMaxDateConnection( DatabaseMeta maxDateConnection ) { + this.maxDateConnection = maxDateConnection; + } + + /** + * Gets the maximum date difference between start and end dates for row/record processing. This can be used for + * auditing and throttling data during warehousing operations. + * + * @return the maximum date difference + */ + public double getMaxDateDifference() { + return maxDateDifference; + } + + /** + * Sets the maximum date difference between start and end dates for row/record processing. + * + * @param maxDateDifference + * The date difference to set. + * @see #getMaxDateDifference() + */ + public void setMaxDateDifference( double maxDateDifference ) { + this.maxDateDifference = maxDateDifference; + } + + /** + * Gets the date field associated with "max date" processing. This allows for the filtering of the number of rows to + * process in a transformation by time, such as only processing the rows/records since the last time the + * transformation ran correctly. This can be used for auditing and throttling data during warehousing operations. + * + * @return a string representing the date for the most recent database connection. + * @see #getMaxDateConnection() + */ + public String getMaxDateField() { + return maxDateField; + } + + /** + * Sets the date field associated with "max date" processing. + * + * @param maxDateField + * The date field to set. + * @see #getMaxDateField() + */ + public void setMaxDateField( String maxDateField ) { + this.maxDateField = maxDateField; + } + + /** + * Gets the amount by which to increase the "max date" difference. This is used in "max date" processing, and can be + * used to provide more fine-grained control of the date range. For example, if the end date specifies a minute for + * which the data is not complete, you can "roll-back" the end date by one minute by + * + * @return Returns the maxDateOffset. + * @see #setMaxDateOffset(double) + */ + public double getMaxDateOffset() { + return maxDateOffset; + } + + /** + * Sets the amount by which to increase the end date in "max date" processing. This can be used to provide more + * fine-grained control of the date range. For example, if the end date specifies a minute for which the data is not + * complete, you can "roll-back" the end date by one minute by setting the offset to -60. + * + * @param maxDateOffset + * The maxDateOffset to set. + */ + public void setMaxDateOffset( double maxDateOffset ) { + this.maxDateOffset = maxDateOffset; + } + + /** + * Gets the database table providing a date to be used in "max date" processing. This allows for the filtering of the + * number of rows to process in a transformation by time, such as only processing the rows/records since the last time + * the transformation ran correctly. + * + * @return Returns the maxDateTable. + * @see #getMaxDateConnection() + */ + public String getMaxDateTable() { + return maxDateTable; + } + + /** + * Sets the table name associated with "max date" processing. + * + * @param maxDateTable + * The maxDateTable to set. + * @see #getMaxDateTable() + */ + public void setMaxDateTable( String maxDateTable ) { + this.maxDateTable = maxDateTable; + } + + /** + * Gets the size of the rowsets. + * + * @return Returns the size of the rowsets. + */ + public int getSizeRowset() { + String rowSetSize = getVariable( Const.KETTLE_TRANS_ROWSET_SIZE ); + int altSize = Const.toInt( rowSetSize, 0 ); + if ( altSize > 0 ) { + return altSize; + } else { + return sizeRowset; + } + } + + /** + * Sets the size of the rowsets. This method allows you to change the size of the buffers between the connected steps + * in a transformation. NOTE: Do not change this parameter unless you are running low on memory, for example. + * + * @param sizeRowset + * The sizeRowset to set. + */ + public void setSizeRowset( int sizeRowset ) { + this.sizeRowset = sizeRowset; + } + + /** + * Gets the database cache object. + * + * @return the database cache object. + */ + public DBCache getDbCache() { + return dbCache; + } + + /** + * Sets the database cache object. + * + * @param dbCache + * the database cache object to set + */ + public void setDbCache( DBCache dbCache ) { + this.dbCache = dbCache; + } + + /** + * Gets the version of the transformation. + * + * @return The version of the transformation + */ + public String getTransversion() { + return trans_version; + } + + /** + * Sets the version of the transformation. + * + * @param n + * The new version description of the transformation + */ + public void setTransversion( String n ) { + trans_version = n; + } + + /** + * Sets the status of the transformation. + * + * @param n + * The new status description of the transformation + */ + public void setTransstatus( int n ) { + trans_status = n; + } + + /** + * Gets the status of the transformation. + * + * @return The status of the transformation + */ + public int getTransstatus() { + return trans_status; + } + + /** + * Gets a textual representation of the transformation. If its name has been set, it will be returned, otherwise the + * classname is returned. + * + * @return the textual representation of the transformation. + */ + @Override + public String toString() { + if ( !Const.isEmpty( filename ) ) { + if ( Const.isEmpty( name ) ) { + return filename; + } else { + return filename + " : " + name; + } + } + + if ( name != null ) { + if ( directory != null ) { + String path = directory.getPath(); + if ( path.endsWith( RepositoryDirectory.DIRECTORY_SEPARATOR ) ) { + return path + name; + } else { + return path + RepositoryDirectory.DIRECTORY_SEPARATOR + name; + } + } else { + return name; + } + } else { + return TransMeta.class.getName(); + } + } + + /** + * Cancels queries opened for checking & fieldprediction. + * + * @throws KettleDatabaseException + * if any errors occur during query cancellation + */ + public void cancelQueries() throws KettleDatabaseException { + for ( int i = 0; i < nrSteps(); i++ ) { + getStep( i ).getStepMetaInterface().cancelQueries(); + } + } + + /** + * Gets the arguments (and their values) used by this transformation. If argument values are supplied by parameter, + * the values will used for the arguments. If the values are null or empty, the method will attempt to use argument + * values from a previous execution. + * + * @param arguments + * the values for the arguments + * @return A row with the used arguments (and their values) in it. + */ + public Map getUsedArguments( String[] arguments ) { + Map transArgs = new HashMap(); + + for ( int i = 0; i < nrSteps(); i++ ) { + StepMetaInterface smi = getStep( i ).getStepMetaInterface(); + Map stepArgs = smi.getUsedArguments(); // Get the command line arguments that this step uses. + if ( stepArgs != null ) { + transArgs.putAll( stepArgs ); + } + } + + // OK, so perhaps, we can use the arguments from a previous execution? + String[] saved = Props.isInitialized() ? Props.getInstance().getLastArguments() : null; + + // Set the default values on it... + // Also change the name to "Argument 1" .. "Argument 10" + // + for ( String argument : transArgs.keySet() ) { + String value = ""; + int argNr = Const.toInt( argument, -1 ); + if ( arguments != null && argNr > 0 && argNr <= arguments.length ) { + value = Const.NVL( arguments[argNr - 1], "" ); + } + if ( value.length() == 0 ) { // try the saved option... + + if ( argNr > 0 && argNr < saved.length && saved[argNr] != null ) { + value = saved[argNr - 1]; + } + } + transArgs.put( argument, value ); + } + + return transArgs; + } + + /** + * Gets the amount of time (in nano-seconds) to wait while the input buffer is empty. + * + * @return the number of nano-seconds to wait while the input buffer is empty. + */ + public int getSleepTimeEmpty() { + return sleepTimeEmpty; + } + + /** + * Gets the amount of time (in nano-seconds) to wait while the input buffer is full. + * + * @return the number of nano-seconds to wait while the input buffer is full. + */ + public int getSleepTimeFull() { + return sleepTimeFull; + } + + /** + * Sets the amount of time (in nano-seconds) to wait while the input buffer is empty. + * + * @param sleepTimeEmpty + * the number of nano-seconds to wait while the input buffer is empty. + */ + public void setSleepTimeEmpty( int sleepTimeEmpty ) { + this.sleepTimeEmpty = sleepTimeEmpty; + } + + /** + * Sets the amount of time (in nano-seconds) to wait while the input buffer is full. + * + * @param sleepTimeFull + * the number of nano-seconds to wait while the input buffer is full. + */ + public void setSleepTimeFull( int sleepTimeFull ) { + this.sleepTimeFull = sleepTimeFull; + } + + /** + * This method asks all steps in the transformation whether or not the specified database connection is used. The + * connection is used in the transformation if any of the steps uses it or if it is being used to log to. + * + * @param databaseMeta + * The connection to check + * @return true if the connection is used in this transformation. + */ + public boolean isDatabaseConnectionUsed( DatabaseMeta databaseMeta ) { + for ( int i = 0; i < nrSteps(); i++ ) { + StepMeta stepMeta = getStep( i ); + DatabaseMeta[] dbs = stepMeta.getStepMetaInterface().getUsedDatabaseConnections(); + for ( int d = 0; d < dbs.length; d++ ) { + if ( dbs[d].equals( databaseMeta ) ) { + return true; + } + } + } + + if ( transLogTable.getDatabaseMeta() != null && transLogTable.getDatabaseMeta().equals( databaseMeta ) ) { + return true; + } + + return false; + } + + /* + * public List getInputFiles() { return inputFiles; } + * + * public void setInputFiles(List inputFiles) { this.inputFiles = inputFiles; } + */ + + /** + * Gets a list of all the strings used in this transformation. The parameters indicate which collections to search and + * which to exclude. + * + * @param searchSteps + * true if steps should be searched, false otherwise + * @param searchDatabases + * true if databases should be searched, false otherwise + * @param searchNotes + * true if notes should be searched, false otherwise + * @param includePasswords + * true if passwords should be searched, false otherwise + * @return a list of search results for strings used in the transformation. + */ + public List getStringList( boolean searchSteps, boolean searchDatabases, boolean searchNotes, + boolean includePasswords ) { + List stringList = new ArrayList(); + + if ( searchSteps ) { + // Loop over all steps in the transformation and see what the used vars are... + for ( int i = 0; i < nrSteps(); i++ ) { + StepMeta stepMeta = getStep( i ); + stringList.add( new StringSearchResult( stepMeta.getName(), stepMeta, this, + BaseMessages.getString( PKG, "TransMeta.SearchMetadata.StepName" ) ) ); + if ( stepMeta.getDescription() != null ) { + stringList.add( new StringSearchResult( stepMeta.getDescription(), stepMeta, this, + BaseMessages.getString( PKG, "TransMeta.SearchMetadata.StepDescription" ) ) ); + } + StepMetaInterface metaInterface = stepMeta.getStepMetaInterface(); + StringSearcher.findMetaData( metaInterface, 1, stringList, stepMeta, this ); + } + } + + // Loop over all steps in the transformation and see what the used vars are... + if ( searchDatabases ) { + for ( int i = 0; i < nrDatabases(); i++ ) { + DatabaseMeta meta = getDatabase( i ); + stringList.add( new StringSearchResult( meta.getName(), meta, this, + BaseMessages.getString( PKG, "TransMeta.SearchMetadata.DatabaseConnectionName" ) ) ); + if ( meta.getHostname() != null ) { + stringList.add( new StringSearchResult( meta.getHostname(), meta, this, + BaseMessages.getString( PKG, "TransMeta.SearchMetadata.DatabaseHostName" ) ) ); + } + if ( meta.getDatabaseName() != null ) { + stringList.add( new StringSearchResult( meta.getDatabaseName(), meta, this, + BaseMessages.getString( PKG, "TransMeta.SearchMetadata.DatabaseName" ) ) ); + } + if ( meta.getUsername() != null ) { + stringList.add( new StringSearchResult( meta.getUsername(), meta, this, + BaseMessages.getString( PKG, "TransMeta.SearchMetadata.DatabaseUsername" ) ) ); + } + if ( meta.getPluginId() != null ) { + stringList.add( new StringSearchResult( meta.getPluginId(), meta, this, + BaseMessages.getString( PKG, "TransMeta.SearchMetadata.DatabaseTypeDescription" ) ) ); + } + if ( meta.getDatabasePortNumberString() != null ) { + stringList.add( new StringSearchResult( meta.getDatabasePortNumberString(), meta, this, + BaseMessages.getString( PKG, "TransMeta.SearchMetadata.DatabasePort" ) ) ); + } + if ( meta.getServername() != null ) { + stringList.add( new StringSearchResult( meta.getServername(), meta, this, + BaseMessages.getString( PKG, "TransMeta.SearchMetadata.DatabaseServer" ) ) ); + } + if ( includePasswords ) { + if ( meta.getPassword() != null ) { + stringList.add( new StringSearchResult( meta.getPassword(), meta, this, + BaseMessages.getString( PKG, "TransMeta.SearchMetadata.DatabasePassword" ) ) ); + } + } + } + } + + // Loop over all steps in the transformation and see what the used vars are... + if ( searchNotes ) { + for ( int i = 0; i < nrNotes(); i++ ) { + NotePadMeta meta = getNote( i ); + if ( meta.getNote() != null ) { + stringList.add( new StringSearchResult( meta.getNote(), meta, this, + BaseMessages.getString( PKG, "TransMeta.SearchMetadata.NotepadText" ) ) ); + } + } + } + + return stringList; + } + + /** + * Get a list of all the strings used in this transformation. The parameters indicate which collections to search and + * which to exclude. + * + * @param searchSteps + * true if steps should be searched, false otherwise + * @param searchDatabases + * true if databases should be searched, false otherwise + * @param searchNotes + * true if notes should be searched, false otherwise + * @return a list of search results for strings used in the transformation. + */ + public List getStringList( boolean searchSteps, boolean searchDatabases, boolean searchNotes ) { + return getStringList( searchSteps, searchDatabases, searchNotes, false ); + } + + /** + * Gets a list of the used variables in this transformation. + * + * @return a list of the used variables in this transformation. + */ + public List getUsedVariables() { + // Get the list of Strings. + List stringList = getStringList( true, true, false, true ); + + List varList = new ArrayList(); + + // Look around in the strings, see what we find... + for ( int i = 0; i < stringList.size(); i++ ) { + StringSearchResult result = stringList.get( i ); + StringUtil.getUsedVariables( result.getString(), varList, false ); + } + + return varList; + } + + /** + * Gets the previous result. + * + * @return the previous Result. + * @deprecated this was moved to Trans to keep the metadata stateless + */ + @Deprecated + public Result getPreviousResult() { + return previousResult; + } + + /** + * Sets the previous result. + * + * @param previousResult + * The previous Result to set. + * @deprecated this was moved to Trans to keep the metadata stateless + */ + @Deprecated + public void setPreviousResult( Result previousResult ) { + this.previousResult = previousResult; + } + + /** + * Gets a list of the files in the result. + * + * @return a list of ResultFiles. + * + * @deprecated this was moved to Trans to keep the metadata stateless + */ + @Deprecated + public List getResultFiles() { + return resultFiles; + } + + /** + * Sets the list of the files in the result. + * + * @param resultFiles + * The list of ResultFiles to set. + * @deprecated this was moved to Trans to keep the metadata stateless + */ + @Deprecated + public void setResultFiles( List resultFiles ) { + this.resultFiles = resultFiles; + } + + /** + * Gets a list of partition schemas for this transformation. + * + * @return a list of PartitionSchemas + */ + public List getPartitionSchemas() { + return partitionSchemas; + } + + /** + * Sets the list of partition schemas for this transformation. + * + * @param partitionSchemas + * the list of PartitionSchemas to set + */ + public void setPartitionSchemas( List partitionSchemas ) { + this.partitionSchemas = partitionSchemas; + } + + /** + * Gets the partition schemas' names. + * + * @return a String array containing the available partition schema names. + */ + public String[] getPartitionSchemasNames() { + String[] names = new String[partitionSchemas.size()]; + for ( int i = 0; i < names.length; i++ ) { + names[i] = partitionSchemas.get( i ).getName(); + } + return names; + } + + /** + * Checks if is feedback shown. + * + * @return true if feedback is shown, false otherwise + */ + public boolean isFeedbackShown() { + return feedbackShown; + } + + /** + * Sets whether the feedback should be shown. + * + * @param feedbackShown + * true if feedback should be shown, false otherwise + */ + public void setFeedbackShown( boolean feedbackShown ) { + this.feedbackShown = feedbackShown; + } + + /** + * Gets the feedback size. + * + * @return the feedback size + */ + public int getFeedbackSize() { + return feedbackSize; + } + + /** + * Sets the feedback size. + * + * @param feedbackSize + * the feedback size to set + */ + public void setFeedbackSize( int feedbackSize ) { + this.feedbackSize = feedbackSize; + } + + /** + * Checks if the transformation is using unique database connections. + * + * @return true if the transformation is using unique database connections, false otherwise + */ + public boolean isUsingUniqueConnections() { + return usingUniqueConnections; + } + + /** + * Sets whether the transformation is using unique database connections. + * + * @param usingUniqueConnections + * true if the transformation is using unique database connections, false otherwise + */ + public void setUsingUniqueConnections( boolean usingUniqueConnections ) { + this.usingUniqueConnections = usingUniqueConnections; + } + + /** + * Gets a list of the cluster schemas used by the transformation. + * + * @return a list of ClusterSchemas + */ + public List getClusterSchemas() { + return clusterSchemas; + } + + /** + * Sets list of the cluster schemas used by the transformation. + * + * @param clusterSchemas + * the list of ClusterSchemas to set + */ + public void setClusterSchemas( List clusterSchemas ) { + this.clusterSchemas = clusterSchemas; + } + + /** + * Gets the cluster schema names. + * + * @return a String array containing the cluster schemas' names + */ + public String[] getClusterSchemaNames() { + String[] names = new String[clusterSchemas.size()]; + for ( int i = 0; i < names.length; i++ ) { + names[i] = clusterSchemas.get( i ).getName(); + } + return names; + } + + /** + * Find a partition schema using its name. + * + * @param name + * The name of the partition schema to look for. + * @return the partition with the specified name of null if nothing was found + */ + public PartitionSchema findPartitionSchema( String name ) { + for ( int i = 0; i < partitionSchemas.size(); i++ ) { + PartitionSchema schema = partitionSchemas.get( i ); + if ( schema.getName().equalsIgnoreCase( name ) ) { + return schema; + } + } + return null; + } + + /** + * Find a clustering schema using its name. + * + * @param name + * The name of the clustering schema to look for. + * @return the cluster schema with the specified name of null if nothing was found + */ + public ClusterSchema findClusterSchema( String name ) { + for ( int i = 0; i < clusterSchemas.size(); i++ ) { + ClusterSchema schema = clusterSchemas.get( i ); + if ( schema.getName().equalsIgnoreCase( name ) ) { + return schema; + } + } + return null; + } + + /** + * Add a new partition schema to the transformation if that didn't exist yet. Otherwise, replace it. + * + * @param partitionSchema + * The partition schema to be added. + */ + public void addOrReplacePartitionSchema( PartitionSchema partitionSchema ) { + int index = partitionSchemas.indexOf( partitionSchema ); + if ( index < 0 ) { + partitionSchemas.add( partitionSchema ); + } else { + PartitionSchema previous = partitionSchemas.get( index ); + previous.replaceMeta( partitionSchema ); + } + setChanged(); + } + + /** + * Add a new cluster schema to the transformation if that didn't exist yet. Otherwise, replace it. + * + * @param clusterSchema + * The cluster schema to be added. + */ + public void addOrReplaceClusterSchema( ClusterSchema clusterSchema ) { + int index = clusterSchemas.indexOf( clusterSchema ); + if ( index < 0 ) { + clusterSchemas.add( clusterSchema ); + } else { + ClusterSchema previous = clusterSchemas.get( index ); + previous.replaceMeta( clusterSchema ); + } + setChanged(); + } + + /** + * Save shared objects, including databases, steps, partition schemas, slave servers, and cluster schemas, to a file + * + * @throws KettleException + * the kettle exception + * @see org.pentaho.di.core.EngineMetaInterface#saveSharedObjects() + * @see org.pentaho.di.shared.SharedObjects#saveToFile() + */ + public void saveSharedObjects() throws KettleException { + try { + // Save the meta store shared objects... + // + saveMetaStoreObjects( repository, metaStore ); + + // Load all the shared objects... + String soFile = environmentSubstitute( sharedObjectsFile ); + SharedObjects sharedObjects = new SharedObjects( soFile ); + + // Now overwrite the objects in there + List shared = new ArrayList(); + shared.addAll( databases ); + shared.addAll( steps ); + shared.addAll( partitionSchemas ); + shared.addAll( slaveServers ); + shared.addAll( clusterSchemas ); + + // The databases connections... + for ( SharedObjectInterface sharedObject : shared ) { + if ( sharedObject.isShared() ) { + sharedObjects.storeObject( sharedObject ); + } + } + + // Save the objects + sharedObjects.saveToFile(); + } catch ( Exception e ) { + throw new KettleException( "Unable to save shared ojects", e ); + } + } + + /** + * Checks whether the transformation is using thread priority management. + * + * @return true if the transformation is using thread priority management, false otherwise + */ + public boolean isUsingThreadPriorityManagment() { + return usingThreadPriorityManagment; + } + + /** + * Sets whether the transformation is using thread priority management. + * + * @param usingThreadPriorityManagment + * true if the transformation is using thread priority management, false otherwise + */ + public void setUsingThreadPriorityManagment( boolean usingThreadPriorityManagment ) { + this.usingThreadPriorityManagment = usingThreadPriorityManagment; + } + + /** + * Check a step to see if there are no multiple steps to read from. If so, check to see if the receiving rows are all + * the same in layout. We only want to ONLY use the DBCache for this to prevent GUI stalls. + * + * @param stepMeta + * the step to check + * @param monitor + * the monitor + * @throws KettleRowException + * in case we detect a row mixing violation + */ + public void checkRowMixingStatically( StepMeta stepMeta, ProgressMonitorListener monitor ) throws KettleRowException { + int nrPrevious = findNrPrevSteps( stepMeta ); + if ( nrPrevious > 1 ) { + RowMetaInterface referenceRow = null; + // See if all previous steps send out the same rows... + for ( int i = 0; i < nrPrevious; i++ ) { + StepMeta previousStep = findPrevStep( stepMeta, i ); + try { + RowMetaInterface row = getStepFields( previousStep, monitor ); // Throws KettleStepException + if ( referenceRow == null ) { + referenceRow = row; + } else if ( !stepMeta.getStepMetaInterface().excludeFromRowLayoutVerification() ) { + BaseStep.safeModeChecking( referenceRow, row ); + } + } catch ( KettleStepException e ) { + // We ignore this one because we are in the process of designing the transformation, anything intermediate can + // go wrong. + } + } + } + } + + /** + * Sets the internal kettle variables. + * + * @param var + * the new internal kettle variables + */ + public void setInternalKettleVariables( VariableSpace var ) { + setInternalFilenameKettleVariables( var ); + setInternalNameKettleVariable( var ); + + // The name of the directory in the repository + // + variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_REPOSITORY_DIRECTORY, + directory != null ? directory.getPath() : "" ); + + boolean hasRepoDir = getRepositoryDirectory() != null && getRepository() != null; + + if ( hasRepoDir ) { + variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY, + variables.getVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_REPOSITORY_DIRECTORY ) ); + } else { + variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_REPOSITORY_DIRECTORY, + variables.getVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY ) ); + } + + // Here we don't remove the job specific parameters, as they may come in handy. + // + if ( variables.getVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY ) == null ) { + variables.setVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY, "Parent Job File Directory" ); + } + if ( variables.getVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_NAME ) == null ) { + variables.setVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_NAME, "Parent Job Filename" ); + } + if ( variables.getVariable( Const.INTERNAL_VARIABLE_JOB_NAME ) == null ) { + variables.setVariable( Const.INTERNAL_VARIABLE_JOB_NAME, "Parent Job Name" ); + } + if ( variables.getVariable( Const.INTERNAL_VARIABLE_JOB_REPOSITORY_DIRECTORY ) == null ) { + variables.setVariable( Const.INTERNAL_VARIABLE_JOB_REPOSITORY_DIRECTORY, "Parent Job Repository Directory" ); + } + + variables.setVariable( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY, + variables.getVariable( repository != null ? Const.INTERNAL_VARIABLE_TRANSFORMATION_REPOSITORY_DIRECTORY + : Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY ) ); + } + + /** + * Sets the internal name kettle variable. + * + * @param var + * the new internal name kettle variable + */ + protected void setInternalNameKettleVariable( VariableSpace var ) { + // The name of the transformation + // + variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_NAME, Const.NVL( name, "" ) ); + } + + /** + * Sets the internal filename kettle variables. + * + * @param var + * the new internal filename kettle variables + */ + protected void setInternalFilenameKettleVariables( VariableSpace var ) { + // If we have a filename that's defined, set variables. If not, clear them. + // + if ( !Const.isEmpty( filename ) ) { + try { + FileObject fileObject = KettleVFS.getFileObject( filename, var ); + FileName fileName = fileObject.getName(); + + // The filename of the transformation + variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_NAME, fileName.getBaseName() ); + + // The directory of the transformation + FileName fileDir = fileName.getParent(); + variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY, fileDir.getURI() ); + } catch ( KettleFileException e ) { + log.logError( "Unexpected error setting internal filename variables!", e ); + + variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY, "" ); + variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_NAME, "" ); + } + } else { + variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY, "" ); + variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_NAME, "" ); + } + + } + + /** + * Finds the mapping input step with the specified name. If no mapping input step is found, null is returned + * + * @param stepname + * the name to search for + * @return the step meta-data corresponding to the desired mapping input step, or null if no step was found + * @throws KettleStepException + * if any errors occur during the search + */ + public StepMeta findMappingInputStep( String stepname ) throws KettleStepException { + if ( !Const.isEmpty( stepname ) ) { + StepMeta stepMeta = findStep( stepname ); // TODO verify that it's a mapping input!! + if ( stepMeta == null ) { + throw new KettleStepException( BaseMessages.getString( + PKG, "TransMeta.Exception.StepNameNotFound", stepname ) ); + } + return stepMeta; + } else { + // Find the first mapping input step that fits the bill. + StepMeta stepMeta = null; + for ( StepMeta mappingStep : steps ) { + if ( mappingStep.getStepID().equals( "MappingInput" ) ) { + if ( stepMeta == null ) { + stepMeta = mappingStep; + } else if ( stepMeta != null ) { + throw new KettleStepException( BaseMessages.getString( + PKG, "TransMeta.Exception.OnlyOneMappingInputStepAllowed", "2" ) ); + } + } + } + if ( stepMeta == null ) { + throw new KettleStepException( BaseMessages.getString( + PKG, "TransMeta.Exception.OneMappingInputStepRequired" ) ); + } + return stepMeta; + } + } + + /** + * Finds the mapping output step with the specified name. If no mapping output step is found, null is returned. + * + * @param stepname + * the name to search for + * @return the step meta-data corresponding to the desired mapping input step, or null if no step was found + * @throws KettleStepException + * if any errors occur during the search + */ + public StepMeta findMappingOutputStep( String stepname ) throws KettleStepException { + if ( !Const.isEmpty( stepname ) ) { + StepMeta stepMeta = findStep( stepname ); // TODO verify that it's a mapping output step. + if ( stepMeta == null ) { + throw new KettleStepException( BaseMessages.getString( + PKG, "TransMeta.Exception.StepNameNotFound", stepname ) ); + } + return stepMeta; + } else { + // Find the first mapping output step that fits the bill. + StepMeta stepMeta = null; + for ( StepMeta mappingStep : steps ) { + if ( mappingStep.getStepID().equals( "MappingOutput" ) ) { + if ( stepMeta == null ) { + stepMeta = mappingStep; + } else if ( stepMeta != null ) { + throw new KettleStepException( BaseMessages.getString( + PKG, "TransMeta.Exception.OnlyOneMappingOutputStepAllowed", "2" ) ); + } + } + } + if ( stepMeta == null ) { + throw new KettleStepException( BaseMessages.getString( + PKG, "TransMeta.Exception.OneMappingOutputStepRequired" ) ); + } + return stepMeta; + } + } + + /** + * Gets a list of the resource dependencies. + * + * @return a list of ResourceReferences + */ + public List getResourceDependencies() { + List resourceReferences = new ArrayList(); + + for ( StepMeta stepMeta : steps ) { + resourceReferences.addAll( stepMeta.getResourceDependencies( this ) ); + } + + return resourceReferences; + } + + /** + * Exports the specified objects to a flat-file system, adding content with filename keys to a set of definitions. The + * supplied resource naming interface allows the object to name appropriately without worrying about those parts of + * the implementation specific details. + * + * @param space + * the variable space to use + * @param definitions + * @param resourceNamingInterface + * @param repository + * The repository to optionally load other resources from (to be converted to XML) + * @param metaStore + * the metaStore in which non-kettle metadata could reside. + * + * @return the filename of the exported resource + */ + public String exportResources( VariableSpace space, Map definitions, + ResourceNamingInterface resourceNamingInterface, Repository repository, IMetaStore metaStore ) throws KettleException { + + try { + // Handle naming for both repository and XML bases resources... + // + String baseName; + String originalPath; + String fullname; + String extension = "ktr"; + if ( Const.isEmpty( getFilename() ) ) { + // Assume repository... + // + originalPath = directory.getPath(); + baseName = getName(); + fullname = + directory.getPath() + + ( directory.getPath().endsWith( RepositoryDirectory.DIRECTORY_SEPARATOR ) + ? "" : RepositoryDirectory.DIRECTORY_SEPARATOR ) + getName() + "." + extension; // + } else { + // Assume file + // + FileObject fileObject = KettleVFS.getFileObject( space.environmentSubstitute( getFilename() ), space ); + originalPath = fileObject.getParent().getURL().toString(); + baseName = fileObject.getName().getBaseName(); + fullname = fileObject.getURL().toString(); + } + + String + exportFileName = + resourceNamingInterface + .nameResource( baseName, originalPath, extension, ResourceNamingInterface.FileNamingType.TRANSFORMATION ); + ResourceDefinition definition = definitions.get( exportFileName ); + if ( definition == null ) { + // If we do this once, it will be plenty :-) + // + TransMeta transMeta = (TransMeta) this.realClone( false ); + // transMeta.copyVariablesFrom(space); + + // Add used resources, modify transMeta accordingly + // Go through the list of steps, etc. + // These critters change the steps in the cloned TransMeta + // At the end we make a new XML version of it in "exported" + // format... + + // loop over steps, databases will be exported to XML anyway. + // + for ( StepMeta stepMeta : transMeta.getSteps() ) { + stepMeta.exportResources( space, definitions, resourceNamingInterface, repository, metaStore ); + } + + // Change the filename, calling this sets internal variables + // inside of the transformation. + // + transMeta.setFilename( exportFileName ); + + // All objects get re-located to the root folder + // + transMeta.setRepositoryDirectory( new RepositoryDirectory() ); + + // Set a number of parameters for all the data files referenced so far... + // + Map directoryMap = resourceNamingInterface.getDirectoryMap(); + if ( directoryMap != null ) { + for ( String directory : directoryMap.keySet() ) { + String parameterName = directoryMap.get( directory ); + transMeta.addParameterDefinition( parameterName, directory, "Data file path discovered during export" ); + } + } + + // At the end, add ourselves to the map... + // + String transMetaContent = transMeta.getXML(); + + definition = new ResourceDefinition( exportFileName, transMetaContent ); + + // Also remember the original filename (if any), including variables etc. + // + if ( Const.isEmpty( this.getFilename() ) ) { // Repository + definition.setOrigin( fullname ); + } else { + definition.setOrigin( this.getFilename() ); + } + + definitions.put( fullname, definition ); + } + return exportFileName; + } catch ( FileSystemException e ) { + throw new KettleException( BaseMessages.getString( + PKG, "TransMeta.Exception.ErrorOpeningOrValidatingTheXMLFile", getFilename() ), e ); + } catch ( KettleFileException e ) { + throw new KettleException( BaseMessages.getString( + PKG, "TransMeta.Exception.ErrorOpeningOrValidatingTheXMLFile", getFilename() ), e ); + } + } + + /** + * Gets the slave step copy partition distribution. + * + * @return the SlaveStepCopyPartitionDistribution + */ + public SlaveStepCopyPartitionDistribution getSlaveStepCopyPartitionDistribution() { + return slaveStepCopyPartitionDistribution; + } + + /** + * Sets the slave step copy partition distribution. + * + * @param slaveStepCopyPartitionDistribution + * the slaveStepCopyPartitionDistribution to set + */ + public void setSlaveStepCopyPartitionDistribution( + SlaveStepCopyPartitionDistribution slaveStepCopyPartitionDistribution ) { + this.slaveStepCopyPartitionDistribution = slaveStepCopyPartitionDistribution; + } + + /** + * Finds the first used cluster schema. + * + * @return the first used cluster schema + */ + public ClusterSchema findFirstUsedClusterSchema() { + for ( StepMeta stepMeta : steps ) { + if ( stepMeta.getClusterSchema() != null ) { + return stepMeta.getClusterSchema(); + } + } + return null; + } + + /** + * Checks whether the transformation is a slave transformation. + * + * @return true if the transformation is a slave transformation, false otherwise + */ + public boolean isSlaveTransformation() { + return slaveTransformation; + } + + /** + * Sets whether the transformation is a slave transformation. + * + * @param slaveTransformation + * true if the transformation is a slave transformation, false otherwise + */ + public void setSlaveTransformation( boolean slaveTransformation ) { + this.slaveTransformation = slaveTransformation; + } + + /** + * Checks whether the transformation is capturing step performance snapshots. + * + * @return true if the transformation is capturing step performance snapshots, false otherwise + */ + public boolean isCapturingStepPerformanceSnapShots() { + return capturingStepPerformanceSnapShots; + } + + /** + * Sets whether the transformation is capturing step performance snapshots. + * + * @param capturingStepPerformanceSnapShots + * true if the transformation is capturing step performance snapshots, false otherwise + */ + public void setCapturingStepPerformanceSnapShots( boolean capturingStepPerformanceSnapShots ) { + this.capturingStepPerformanceSnapShots = capturingStepPerformanceSnapShots; + } + + /** + * Gets the step performance capturing delay. + * + * @return the step performance capturing delay + */ + public long getStepPerformanceCapturingDelay() { + return stepPerformanceCapturingDelay; + } + + /** + * Sets the step performance capturing delay. + * + * @param stepPerformanceCapturingDelay + * the stepPerformanceCapturingDelay to set + */ + public void setStepPerformanceCapturingDelay( long stepPerformanceCapturingDelay ) { + this.stepPerformanceCapturingDelay = stepPerformanceCapturingDelay; + } + + /** + * Gets the step performance capturing size limit. + * + * @return the step performance capturing size limit + */ + public String getStepPerformanceCapturingSizeLimit() { + return stepPerformanceCapturingSizeLimit; + } + + /** + * Sets the step performance capturing size limit. + * + * @param stepPerformanceCapturingSizeLimit + * the step performance capturing size limit to set + */ + public void setStepPerformanceCapturingSizeLimit( String stepPerformanceCapturingSizeLimit ) { + this.stepPerformanceCapturingSizeLimit = stepPerformanceCapturingSizeLimit; + } + + /** + * Clears the step fields and loop caches. + */ + public void clearCaches() { + clearStepFieldsCachce(); + clearLoopCache(); + } + + /** + * Clears the step fields cachce. + */ + private void clearStepFieldsCachce() { + stepsFieldsCache.clear(); + } + + /** + * Clears the loop cache. + */ + private void clearLoopCache() { + loopCache.clear(); + } + + /** + * Gets the repository element type. + * + * @return the repository element type + * @see org.pentaho.di.repository.RepositoryElementInterface#getRepositoryElementType() + */ + public RepositoryObjectType getRepositoryElementType() { + return REPOSITORY_ELEMENT_TYPE; + } + + /** + * Gets the log channel. + * + * @return the log channel + */ + public LogChannelInterface getLogChannel() { + return log; + } + + /** + * Gets the log channel ID. + * + * @return the log channel ID + * @see org.pentaho.di.core.logging.LoggingObjectInterface#getLogChannelId() + */ + public String getLogChannelId() { + return log.getLogChannelId(); + } + + /** + * Gets the object type. + * + * @return the object type + * @see org.pentaho.di.core.logging.LoggingObjectInterface#getObjectType() + */ + public LoggingObjectType getObjectType() { + return LoggingObjectType.TRANSMETA; + } + + /** + * Gets the log table for the transformation. + * + * @return the log table for the transformation + */ + public TransLogTable getTransLogTable() { + return transLogTable; + } + + /** + * Sets the log table for the transformation. + * + * @param the + * log table to set + */ + public void setTransLogTable( TransLogTable transLogTable ) { + this.transLogTable = transLogTable; + } + + /** + * Gets the performance log table for the transformation. + * + * @return the performance log table for the transformation + */ + public PerformanceLogTable getPerformanceLogTable() { + return performanceLogTable; + } + + /** + * Sets the performance log table for the transformation. + * + * @param performanceLogTable + * the performance log table to set + */ + public void setPerformanceLogTable( PerformanceLogTable performanceLogTable ) { + this.performanceLogTable = performanceLogTable; + } + + /** + * Gets the step log table for the transformation. + * + * @return the step log table for the transformation + */ + public StepLogTable getStepLogTable() { + return stepLogTable; + } + + /** + * Sets the step log table for the transformation. + * + * @param stepLogTable + * the step log table to set + */ + public void setStepLogTable( StepLogTable stepLogTable ) { + this.stepLogTable = stepLogTable; + } + + /** + * Gets a list of the log tables (transformation, step, performance, channel) for the transformation. + * + * @return a list of LogTableInterfaces for the transformation + */ + public List getLogTables() { + List logTables = new ArrayList(); + logTables.add( transLogTable ); + logTables.add( stepLogTable ); + logTables.add( performanceLogTable ); + logTables.add( channelLogTable ); + logTables.add( metricsLogTable ); + return logTables; + } + + /** + * Gets the transformation type. + * + * @return the transformationType + */ + public TransformationType getTransformationType() { + return transformationType; + } + + /** + * Sets the transformation type. + * + * @param transformationType + * the transformationType to set + */ + public void setTransformationType( TransformationType transformationType ) { + this.transformationType = transformationType; + } + + /** + * Utility method to write the XML of this transformation to a file, mostly for testing purposes. + * + * @param filename + * The filename to save to + * @throws KettleXMLException + * in case something goes wrong. + */ + public void writeXML( String filename ) throws KettleXMLException { + FileOutputStream fos = null; + try { + fos = new FileOutputStream( filename ); + fos.write( XMLHandler.getXMLHeader().getBytes( Const.XML_ENCODING ) ); + fos.write( getXML().getBytes( Const.XML_ENCODING ) ); + } catch ( Exception e ) { + throw new KettleXMLException( "Unable to save to XML file '" + filename + "'", e ); + } finally { + if ( fos != null ) { + try { + fos.close(); + } catch ( IOException e ) { + throw new KettleXMLException( "Unable to close file '" + filename + "'", e ); + } + } + } + } + + /** + * Checks whether the transformation has repository references. + * + * @return true if the transformation has repository references, false otherwise + */ + public boolean hasRepositoryReferences() { + for ( StepMeta stepMeta : steps ) { + if ( stepMeta.getStepMetaInterface().hasRepositoryReferences() ) { + return true; + } + } + return false; + } + + /** + * Looks up the references after a repository import. + * + * @param repository + * the repository to reference. + * @throws KettleException + * the kettle exception + */ + public void lookupRepositoryReferences( Repository repository ) throws KettleException { + for ( StepMeta stepMeta : steps ) { + stepMeta.getStepMetaInterface().lookupRepositoryReferences( repository ); + } + } + + /** + * @return the metricsLogTable + */ + public MetricsLogTable getMetricsLogTable() { + return metricsLogTable; + } + + /** + * @param metricsLogTable + * the metricsLogTable to set + */ + public void setMetricsLogTable( MetricsLogTable metricsLogTable ) { + this.metricsLogTable = metricsLogTable; + } + + @Override + public boolean isGatheringMetrics() { + return log.isGatheringMetrics(); + } + + @Override + public void setGatheringMetrics( boolean gatheringMetrics ) { + log.setGatheringMetrics( gatheringMetrics ); + } + + @Override + public boolean isForcingSeparateLogging() { + return log.isForcingSeparateLogging(); + } + + @Override + public void setForcingSeparateLogging( boolean forcingSeparateLogging ) { + log.setForcingSeparateLogging( forcingSeparateLogging ); + } + + /** + * This method needs to be called to store those objects which are used and referenced in the transformation metadata + * but not saved in the XML serialization. For example, the Kettle data service definition is referenced by name but + * not stored when getXML() is called. + * + * @param metaStore + * The store to save to + * @throws MetaStoreException + * in case there is an error. + */ + public void saveMetaStoreObjects( Repository repository, IMetaStore metaStore ) throws MetaStoreException { + + } + + public void addStepChangeListener( StepMetaChangeListenerInterface listener ) { + stepChangeListeners.add( listener ); + } + + public void addStepChangeListener( int p, StepMetaChangeListenerInterface list ) { + int indexListener = -1; + int indexListenerRemove = -1; + StepMeta rewriteStep = steps.get( p ); + StepMetaInterface iface = rewriteStep.getStepMetaInterface(); + if ( iface instanceof StepMetaChangeListenerInterface ) { + for ( StepMetaChangeListenerInterface listener : stepChangeListeners ) { + indexListener++; + if ( listener.equals( iface ) ) { + indexListenerRemove = indexListener; + } + } + if ( indexListenerRemove >= 0 ) { + stepChangeListeners.add( indexListenerRemove, list ); + } else if ( stepChangeListeners.size() == 0 && p == 0 ) { + stepChangeListeners.add( list ); + } + } + } + + public void removeStepChangeListener( StepMetaChangeListenerInterface list ) { + int indexListener = -1; + int indexListenerRemove = -1; + for ( StepMetaChangeListenerInterface listener : stepChangeListeners ) { + indexListener++; + if ( listener.equals( list ) ) { + indexListenerRemove = indexListener; + } + } + if ( indexListenerRemove >= 0 ) { + stepChangeListeners.remove( indexListenerRemove ); + } + } + + public void notifyAllListeners( StepMeta oldMeta, StepMeta newMeta ) { + for ( StepMetaChangeListenerInterface listener : stepChangeListeners ) { + listener.onStepChange( this, oldMeta, newMeta ); + } + } + + public boolean containsStepMeta( StepMeta stepMeta ) { + return steps.contains( stepMeta ); + } + + public List getMissingTrans() { + return missingTrans; + } + + public void addMissingTrans( MissingTrans trans ) { + if ( missingTrans == null ) { + missingTrans = new ArrayList(); + } + missingTrans.add( trans ); + } + + public void removeMissingTrans( MissingTrans trans ) { + if ( missingTrans != null && trans != null && missingTrans.contains( trans ) ) { + missingTrans.remove( trans ); + } + } + + public boolean hasMissingPlugins() { + return missingTrans != null && !missingTrans.isEmpty(); + } +} diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/append/AppendMeta.java b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/append/AppendMeta.java new file mode 100644 index 0000000..d48f367 --- /dev/null +++ b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/append/AppendMeta.java @@ -0,0 +1,215 @@ +/*! ****************************************************************************** + * + * Pentaho Data Integration + * + * Copyright (C) 2002-2013 by Pentaho : http://www.pentaho.com + * + ******************************************************************************* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + ******************************************************************************/ + +package org.pentaho.di.trans.steps.append; + +import java.util.List; + +import org.pentaho.di.core.CheckResult; +import org.pentaho.di.core.CheckResultInterface; +import org.pentaho.di.core.database.DatabaseMeta; +import org.pentaho.di.core.exception.KettleException; +import org.pentaho.di.core.exception.KettleStepException; +import org.pentaho.di.core.exception.KettleXMLException; +import org.pentaho.di.core.row.RowMetaInterface; +import org.pentaho.di.core.variables.VariableSpace; +import org.pentaho.di.core.xml.XMLHandler; +import org.pentaho.di.i18n.BaseMessages; +import org.pentaho.di.repository.ObjectId; +import org.pentaho.di.repository.Repository; +import org.pentaho.di.trans.Trans; +import org.pentaho.di.trans.TransMeta; +import org.pentaho.di.trans.TransMeta.TransformationType; +import org.pentaho.di.trans.step.BaseStepMeta; +import org.pentaho.di.trans.step.StepDataInterface; +import org.pentaho.di.trans.step.StepIOMeta; +import org.pentaho.di.trans.step.StepIOMetaInterface; +import org.pentaho.di.trans.step.StepInterface; +import org.pentaho.di.trans.step.StepMeta; +import org.pentaho.di.trans.step.StepMetaInterface; +import org.pentaho.di.trans.step.errorhandling.Stream; +import org.pentaho.di.trans.step.errorhandling.StreamIcon; +import org.pentaho.di.trans.step.errorhandling.StreamInterface; +import org.pentaho.di.trans.step.errorhandling.StreamInterface.StreamType; +import org.pentaho.metastore.api.IMetaStore; +import org.w3c.dom.Node; + +/** + * @author Sven Boden + * @since 3-june-2007 + */ +public class AppendMeta extends BaseStepMeta implements StepMetaInterface { + private static Class PKG = Append.class; // for i18n purposes, needed by Translator2!! + + public AppendMeta() { + super(); // allocate BaseStepMeta + } + + public void loadXML( Node stepnode, List databases, IMetaStore metaStore ) throws KettleXMLException { + readData( stepnode ); + } + + public Object clone() { + AppendMeta retval = (AppendMeta) super.clone(); + + return retval; + } + + public String getXML() { + StringBuilder retval = new StringBuilder(); + + List infoStreams = getStepIOMeta().getInfoStreams(); + retval.append( XMLHandler.addTagValue( "head_name", infoStreams.get( 0 ).getStepname() ) ); + retval.append( XMLHandler.addTagValue( "tail_name", infoStreams.get( 1 ).getStepname() ) ); + + return retval.toString(); + } + + private void readData( Node stepnode ) throws KettleXMLException { + try { + List infoStreams = getStepIOMeta().getInfoStreams(); + StreamInterface headStream = infoStreams.get( 0 ); + StreamInterface tailStream = infoStreams.get( 1 ); + headStream.setSubject( XMLHandler.getTagValue( stepnode, "head_name" ) ); + tailStream.setSubject( XMLHandler.getTagValue( stepnode, "tail_name" ) ); + } catch ( Exception e ) { + throw new KettleXMLException( BaseMessages.getString( PKG, "AppendMeta.Exception.UnableToLoadStepInfo" ), e ); + } + } + + public void setDefault() { + } + + public void readRep( Repository rep, IMetaStore metaStore, ObjectId id_step, List databases ) throws KettleException { + try { + List infoStreams = getStepIOMeta().getInfoStreams(); + StreamInterface headStream = infoStreams.get( 0 ); + StreamInterface tailStream = infoStreams.get( 1 ); + headStream.setSubject( rep.getStepAttributeString( id_step, "head_name" ) ); + tailStream.setSubject( rep.getStepAttributeString( id_step, "tail_name" ) ); + } catch ( Exception e ) { + throw new KettleException( BaseMessages.getString( + PKG, "AppendMeta.Exception.UnexpectedErrorReadingStepInfo" ), e ); + } + } + + public void saveRep( Repository rep, IMetaStore metaStore, ObjectId id_transformation, ObjectId id_step ) throws KettleException { + try { + List infoStreams = getStepIOMeta().getInfoStreams(); + StreamInterface headStream = infoStreams.get( 0 ); + StreamInterface tailStream = infoStreams.get( 1 ); + rep.saveStepAttribute( id_transformation, id_step, "head_name", headStream.getStepname() ); + rep.saveStepAttribute( id_transformation, id_step, "tail_name", tailStream.getStepname() ); + } catch ( Exception e ) { + throw new KettleException( BaseMessages.getString( PKG, "AppendMeta.Exception.UnableToSaveStepInfo" ) + + id_step, e ); + } + } + + @Override + public void searchInfoAndTargetSteps( List steps ) { + for ( StreamInterface stream : getStepIOMeta().getInfoStreams() ) { + stream.setStepMeta( StepMeta.findStep( steps, (String) stream.getSubject() ) ); + } + } + + public boolean chosesTargetSteps() { + return false; + } + + public String[] getTargetSteps() { + return null; + } + + public void getFields( RowMetaInterface r, String name, RowMetaInterface[] info, StepMeta nextStep, + VariableSpace space, Repository repository, IMetaStore metaStore ) throws KettleStepException { + // We don't have any input fields here in "r" as they are all info fields. + // So we just take the info fields. + // + if ( info != null ) { + if ( info.length > 0 && info[0] != null ) { + r.mergeRowMeta( info[0] ); + } + } + } + + public void check( List remarks, TransMeta transMeta, StepMeta stepMeta, + RowMetaInterface prev, String[] input, String[] output, RowMetaInterface info, VariableSpace space, + Repository repository, IMetaStore metaStore ) { + CheckResult cr; + + List infoStreams = getStepIOMeta().getInfoStreams(); + StreamInterface headStream = infoStreams.get( 0 ); + StreamInterface tailStream = infoStreams.get( 1 ); + + if ( headStream.getStepname() != null && tailStream.getStepname() != null ) { + cr = + new CheckResult( CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( + PKG, "AppendMeta.CheckResult.SourceStepsOK" ), stepMeta ); + remarks.add( cr ); + } else if ( headStream.getStepname() == null && tailStream.getStepname() == null ) { + cr = + new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages.getString( + PKG, "AppendMeta.CheckResult.SourceStepsMissing" ), stepMeta ); + remarks.add( cr ); + } else { + cr = + new CheckResult( CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( + PKG, "AppendMeta.CheckResult.OneSourceStepMissing" ), stepMeta ); + remarks.add( cr ); + } + } + + public StepInterface getStep( StepMeta stepMeta, StepDataInterface stepDataInterface, int cnr, TransMeta tr, + Trans trans ) { + return new Append( stepMeta, stepDataInterface, cnr, tr, trans ); + } + + public StepDataInterface getStepData() { + return new AppendData(); + } + + /** + * Returns the Input/Output metadata for this step. + */ + public StepIOMetaInterface getStepIOMeta() { + if ( ioMeta == null ) { + + ioMeta = new StepIOMeta( true, true, false, false, false, false ); + + ioMeta.addStream( new Stream( StreamType.INFO, null, BaseMessages.getString( + PKG, "AppendMeta.InfoStream.FirstStream.Description" ), StreamIcon.INFO, null ) ); + ioMeta.addStream( new Stream( StreamType.INFO, null, BaseMessages.getString( + PKG, "AppendMeta.InfoStream.SecondStream.Description" ), StreamIcon.INFO, null ) ); + } + + return ioMeta; + } + + @Override + public void resetStepIoMeta() { + } + + public TransformationType[] getSupportedTransformationTypes() { + return new TransformationType[] { TransformationType.Normal, }; + } +} diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/filterrows/FilterRowsMeta.java b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/filterrows/FilterRowsMeta.java new file mode 100644 index 0000000..09189b5 --- /dev/null +++ b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/filterrows/FilterRowsMeta.java @@ -0,0 +1,426 @@ +/*! ****************************************************************************** + * + * Pentaho Data Integration + * + * Copyright (C) 2002-2013 by Pentaho : http://www.pentaho.com + * + ******************************************************************************* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + ******************************************************************************/ + +package org.pentaho.di.trans.steps.filterrows; + +import java.util.ArrayList; +import java.util.List; + +import org.pentaho.di.core.CheckResult; +import org.pentaho.di.core.CheckResultInterface; +import org.pentaho.di.core.Condition; +import org.pentaho.di.core.Const; +import org.pentaho.di.core.database.DatabaseMeta; +import org.pentaho.di.core.exception.KettleException; +import org.pentaho.di.core.exception.KettleStepException; +import org.pentaho.di.core.exception.KettleXMLException; +import org.pentaho.di.core.row.RowMetaInterface; +import org.pentaho.di.core.row.ValueMetaAndData; +import org.pentaho.di.core.row.ValueMetaInterface; +import org.pentaho.di.core.variables.VariableSpace; +import org.pentaho.di.core.xml.XMLHandler; +import org.pentaho.di.i18n.BaseMessages; +import org.pentaho.di.repository.ObjectId; +import org.pentaho.di.repository.Repository; +import org.pentaho.di.trans.Trans; +import org.pentaho.di.trans.TransMeta; +import org.pentaho.di.trans.step.BaseStepMeta; +import org.pentaho.di.trans.step.StepDataInterface; +import org.pentaho.di.trans.step.StepIOMeta; +import org.pentaho.di.trans.step.StepIOMetaInterface; +import org.pentaho.di.trans.step.StepInterface; +import org.pentaho.di.trans.step.StepMeta; +import org.pentaho.di.trans.step.StepMetaInterface; +import org.pentaho.di.trans.step.errorhandling.Stream; +import org.pentaho.di.trans.step.errorhandling.StreamIcon; +import org.pentaho.di.trans.step.errorhandling.StreamInterface; +import org.pentaho.di.trans.step.errorhandling.StreamInterface.StreamType; +import org.pentaho.metastore.api.IMetaStore; +import org.w3c.dom.Node; + +/* + * Created on 02-jun-2003 + * + */ + +public class FilterRowsMeta extends BaseStepMeta implements StepMetaInterface { + private static Class PKG = FilterRowsMeta.class; // for i18n purposes, needed by Translator2!! + + /** + * This is the main condition for the complete filter. + * + * @since version 2.1 + */ + private Condition condition; + + public FilterRowsMeta() { + super(); // allocate BaseStepMeta + condition = new Condition(); + } + + public void loadXML( Node stepnode, List databases, IMetaStore metaStore ) throws KettleXMLException { + readData( stepnode ); + } + + /** + * @return Returns the condition. + */ + public Condition getCondition() { + return condition; + } + + /** + * @param condition + * The condition to set. + */ + public void setCondition( Condition condition ) { + this.condition = condition; + } + + public void allocate() { + condition = new Condition(); + } + + public Object clone() { + FilterRowsMeta retval = (FilterRowsMeta) super.clone(); + + if ( condition != null ) { + retval.condition = (Condition) condition.clone(); + } else { + retval.condition = null; + } + + return retval; + } + + public String getXML() throws KettleException { + StringBuffer retval = new StringBuffer( 200 ); + + List targetStreams = getStepIOMeta().getTargetStreams(); + retval.append( XMLHandler.addTagValue( "send_true_to", targetStreams.get( 0 ).getStepname() ) ); + retval.append( XMLHandler.addTagValue( "send_false_to", targetStreams.get( 1 ).getStepname() ) ); + retval.append( " " ).append( Const.CR ); + + if ( condition != null ) { + retval.append( condition.getXML() ); + } + + retval.append( " " ).append( Const.CR ); + + return retval.toString(); + } + + private void readData( Node stepnode ) throws KettleXMLException { + try { + List targetStreams = getStepIOMeta().getTargetStreams(); + + targetStreams.get( 0 ).setSubject( XMLHandler.getTagValue( stepnode, "send_true_to" ) ); + targetStreams.get( 1 ).setSubject( XMLHandler.getTagValue( stepnode, "send_false_to" ) ); + + Node compare = XMLHandler.getSubNode( stepnode, "compare" ); + Node condnode = XMLHandler.getSubNode( compare, "condition" ); + + // The new situation... + if ( condnode != null ) { + condition = new Condition( condnode ); + } else { + // Old style condition: Line1 OR Line2 OR Line3: @deprecated! + condition = new Condition(); + + int nrkeys = XMLHandler.countNodes( compare, "key" ); + if ( nrkeys == 1 ) { + Node knode = XMLHandler.getSubNodeByNr( compare, "key", 0 ); + + String key = XMLHandler.getTagValue( knode, "name" ); + String value = XMLHandler.getTagValue( knode, "value" ); + String field = XMLHandler.getTagValue( knode, "field" ); + String comparator = XMLHandler.getTagValue( knode, "condition" ); + + condition.setOperator( Condition.OPERATOR_NONE ); + condition.setLeftValuename( key ); + condition.setFunction( Condition.getFunction( comparator ) ); + condition.setRightValuename( field ); + condition.setRightExact( new ValueMetaAndData( "value", value ) ); + } else { + for ( int i = 0; i < nrkeys; i++ ) { + Node knode = XMLHandler.getSubNodeByNr( compare, "key", i ); + + String key = XMLHandler.getTagValue( knode, "name" ); + String value = XMLHandler.getTagValue( knode, "value" ); + String field = XMLHandler.getTagValue( knode, "field" ); + String comparator = XMLHandler.getTagValue( knode, "condition" ); + + Condition subc = new Condition(); + if ( i > 0 ) { + subc.setOperator( Condition.OPERATOR_OR ); + } else { + subc.setOperator( Condition.OPERATOR_NONE ); + } + subc.setLeftValuename( key ); + subc.setFunction( Condition.getFunction( comparator ) ); + subc.setRightValuename( field ); + subc.setRightExact( new ValueMetaAndData( "value", value ) ); + + condition.addCondition( subc ); + } + } + } + } catch ( Exception e ) { + throw new KettleXMLException( BaseMessages.getString( + PKG, "FilterRowsMeta.Exception..UnableToLoadStepInfoFromXML" ), e ); + } + } + + public void setDefault() { + allocate(); + } + + public void readRep( Repository rep, IMetaStore metaStore, ObjectId id_step, List databases ) throws KettleException { + try { + allocate(); + + List targetStreams = getStepIOMeta().getTargetStreams(); + + targetStreams.get( 0 ).setSubject( rep.getStepAttributeString( id_step, "send_true_to" ) ); + targetStreams.get( 1 ).setSubject( rep.getStepAttributeString( id_step, "send_false_to" ) ); + + condition = rep.loadConditionFromStepAttribute( id_step, "id_condition" ); + + } catch ( Exception e ) { + throw new KettleException( BaseMessages.getString( + PKG, "FilterRowsMeta.Exception.UnexpectedErrorInReadingStepInfoFromRepository" ), e ); + } + } + + @Override + public void searchInfoAndTargetSteps( List steps ) { + for ( StreamInterface stream : getStepIOMeta().getTargetStreams() ) { + stream.setStepMeta( StepMeta.findStep( steps, (String) stream.getSubject() ) ); + } + } + + public void saveRep( Repository rep, IMetaStore metaStore, ObjectId id_transformation, ObjectId id_step ) throws KettleException { + try { + if ( condition != null ) { + List targetStreams = getStepIOMeta().getTargetStreams(); + + rep.saveConditionStepAttribute( id_transformation, id_step, "id_condition", condition ); + rep.saveStepAttribute( id_transformation, id_step, "send_true_to", targetStreams.get( 0 ).getStepname() ); + rep.saveStepAttribute( id_transformation, id_step, "send_false_to", targetStreams.get( 1 ).getStepname() ); + } + } catch ( Exception e ) { + throw new KettleException( BaseMessages.getString( + PKG, "FilterRowsMeta.Exception.UnableToSaveStepInfoToRepository" ) + + id_step, e ); + } + } + + public void getFields( RowMetaInterface rowMeta, String origin, RowMetaInterface[] info, StepMeta nextStep, + VariableSpace space, Repository repository, IMetaStore metaStore ) throws KettleStepException { + // Clear the sortedDescending flag on fields used within the condition - otherwise the comparisons will be + // inverted!! + String[] conditionField = condition.getUsedFields(); + for ( int i = 0; i < conditionField.length; i++ ) { + int idx = rowMeta.indexOfValue( conditionField[i] ); + if ( idx >= 0 ) { + ValueMetaInterface valueMeta = rowMeta.getValueMeta( idx ); + valueMeta.setSortedDescending( false ); + } + } + } + + public void check( List remarks, TransMeta transMeta, StepMeta stepMeta, + RowMetaInterface prev, String[] input, String[] output, RowMetaInterface info, VariableSpace space, + Repository repository, IMetaStore metaStore ) { + CheckResult cr; + String error_message = ""; + + List targetStreams = getStepIOMeta().getTargetStreams(); + + if ( targetStreams.get( 0 ).getStepname() != null ) { + int trueTargetIdx = Const.indexOfString( targetStreams.get( 0 ).getStepname(), output ); + if ( trueTargetIdx < 0 ) { + cr = + new CheckResult( + CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages.getString( + PKG, "FilterRowsMeta.CheckResult.TargetStepInvalid", "true", targetStreams + .get( 0 ).getStepname() ), stepMeta ); + remarks.add( cr ); + } + } + + if ( targetStreams.get( 1 ).getStepname() != null ) { + int falseTargetIdx = Const.indexOfString( targetStreams.get( 1 ).getStepname(), output ); + if ( falseTargetIdx < 0 ) { + cr = + new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages + .getString( PKG, "FilterRowsMeta.CheckResult.TargetStepInvalid", "false", targetStreams + .get( 1 ).getStepname() ), stepMeta ); + remarks.add( cr ); + } + } + + if ( condition.isEmpty() ) { + cr = + new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages.getString( + PKG, "FilterRowsMeta.CheckResult.NoConditionSpecified" ), stepMeta ); + } else { + cr = + new CheckResult( CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( + PKG, "FilterRowsMeta.CheckResult.ConditionSpecified" ), stepMeta ); + } + remarks.add( cr ); + + // Look up fields in the input stream + if ( prev != null && prev.size() > 0 ) { + cr = + new CheckResult( CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( + PKG, "FilterRowsMeta.CheckResult.StepReceivingFields", prev.size() + "" ), stepMeta ); + remarks.add( cr ); + + List orphanFields = getOrphanFields( condition, prev ); + if ( orphanFields.size() > 0 ) { + error_message = BaseMessages.getString( PKG, "FilterRowsMeta.CheckResult.FieldsNotFoundFromPreviousStep" ) + + Const.CR; + for ( String field : orphanFields ) { + error_message += "\t\t" + field + Const.CR; + } + cr = new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, error_message, stepMeta ); + } else { + cr = + new CheckResult( CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( PKG, + "FilterRowsMeta.CheckResult.AllFieldsFoundInInputStream" ), stepMeta ); + } + remarks.add( cr ); + } else { + error_message = + BaseMessages.getString( PKG, "FilterRowsMeta.CheckResult.CouldNotReadFieldsFromPreviousStep" ) + + Const.CR; + cr = new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, error_message, stepMeta ); + remarks.add( cr ); + } + + // See if we have input streams leading to this step! + if ( input.length > 0 ) { + cr = + new CheckResult( CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( + PKG, "FilterRowsMeta.CheckResult.StepReceivingInfoFromOtherSteps" ), stepMeta ); + remarks.add( cr ); + } else { + cr = + new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages.getString( + PKG, "FilterRowsMeta.CheckResult.NoInputReceivedFromOtherSteps" ), stepMeta ); + remarks.add( cr ); + } + } + + public StepInterface getStep( StepMeta stepMeta, StepDataInterface stepDataInterface, int cnr, TransMeta tr, + Trans trans ) { + return new FilterRows( stepMeta, stepDataInterface, cnr, tr, trans ); + } + + public StepDataInterface getStepData() { + return new FilterRowsData(); + } + + /** + * Returns the Input/Output metadata for this step. + */ + public StepIOMetaInterface getStepIOMeta() { + if ( ioMeta == null ) { + + ioMeta = new StepIOMeta( true, true, false, false, false, false ); + + ioMeta.addStream( new Stream( StreamType.TARGET, null, BaseMessages.getString( + PKG, "FilterRowsMeta.InfoStream.True.Description" ), StreamIcon.TRUE, null ) ); + ioMeta.addStream( new Stream( StreamType.TARGET, null, BaseMessages.getString( + PKG, "FilterRowsMeta.InfoStream.False.Description" ), StreamIcon.FALSE, null ) ); + } + + return ioMeta; + } + + @Override + public void resetStepIoMeta() { + } + + /** + * When an optional stream is selected, this method is called to handled the ETL metadata implications of that. + * + * @param stream + * The optional stream to handle. + */ + public void handleStreamSelection( StreamInterface stream ) { + // This step targets another step. + // Make sure that we don't specify the same step for true and false... + // If the user requests false, we blank out true and vice versa + // + List targets = getStepIOMeta().getTargetStreams(); + int index = targets.indexOf( stream ); + if ( index == 0 ) { + // True + // + StepMeta falseStep = targets.get( 1 ).getStepMeta(); + if ( falseStep != null && falseStep.equals( stream.getStepMeta() ) ) { + targets.get( 1 ).setStepMeta( null ); + } + } + if ( index == 1 ) { + // False + // + StepMeta trueStep = targets.get( 0 ).getStepMeta(); + if ( trueStep != null && trueStep.equals( stream.getStepMeta() ) ) { + targets.get( 0 ).setStepMeta( null ); + } + } + } + + @Override + public boolean excludeFromCopyDistributeVerification() { + return true; + } + + /** + * Get non-existing referenced input fields + * @param condition + * @param prev + * @return + */ + public List getOrphanFields( Condition condition, RowMetaInterface prev ) { + List orphans = new ArrayList( ); + if ( condition == null || prev == null ) { + return orphans; + } + String[] key = condition.getUsedFields(); + for ( int i = 0; i < key.length; i++ ) { + if ( Const.isEmpty( key[i] ) ) { + continue; + } + ValueMetaInterface v = prev.searchValueMeta( key[i] ); + if ( v == null ) { + orphans.add( key[i] ); + } + } + return orphans; + } + +} diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/javafilter/JavaFilterMeta.java b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/javafilter/JavaFilterMeta.java new file mode 100644 index 0000000..c3b74e5 --- /dev/null +++ b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/javafilter/JavaFilterMeta.java @@ -0,0 +1,270 @@ +/*! ****************************************************************************** + * + * Pentaho Data Integration + * + * Copyright (C) 2002-2013 by Pentaho : http://www.pentaho.com + * + ******************************************************************************* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + ******************************************************************************/ + +package org.pentaho.di.trans.steps.javafilter; + +import java.util.List; + +import org.pentaho.di.core.CheckResult; +import org.pentaho.di.core.CheckResultInterface; +import org.pentaho.di.core.Const; +import org.pentaho.di.core.database.DatabaseMeta; +import org.pentaho.di.core.exception.KettleException; +import org.pentaho.di.core.exception.KettleXMLException; +import org.pentaho.di.core.row.RowMetaInterface; +import org.pentaho.di.core.variables.VariableSpace; +import org.pentaho.di.core.xml.XMLHandler; +import org.pentaho.di.i18n.BaseMessages; +import org.pentaho.di.repository.ObjectId; +import org.pentaho.di.repository.Repository; +import org.pentaho.di.trans.Trans; +import org.pentaho.di.trans.TransMeta; +import org.pentaho.di.trans.step.BaseStepMeta; +import org.pentaho.di.trans.step.StepDataInterface; +import org.pentaho.di.trans.step.StepIOMeta; +import org.pentaho.di.trans.step.StepIOMetaInterface; +import org.pentaho.di.trans.step.StepInterface; +import org.pentaho.di.trans.step.StepMeta; +import org.pentaho.di.trans.step.StepMetaInterface; +import org.pentaho.di.trans.step.errorhandling.Stream; +import org.pentaho.di.trans.step.errorhandling.StreamIcon; +import org.pentaho.di.trans.step.errorhandling.StreamInterface; +import org.pentaho.di.trans.step.errorhandling.StreamInterface.StreamType; +import org.pentaho.metastore.api.IMetaStore; +import org.w3c.dom.Node; + +/** + * Contains the meta-data for the java filter step: calculates conditions using Janino + * + * Created on 30-oct-2009 + */ +public class JavaFilterMeta extends BaseStepMeta implements StepMetaInterface { + private static Class PKG = JavaFilterMeta.class; // for i18n purposes, needed by Translator2!! + + /** The formula calculations to be performed */ + private String condition; + + public JavaFilterMeta() { + super(); // allocate BaseStepMeta + } + + public String getCondition() { + return condition; + } + + public void setCondition( String condition ) { + this.condition = condition; + } + + public void allocate( int nrCalcs ) { + } + + public void loadXML( Node stepnode, List databases, IMetaStore metaStore ) throws KettleXMLException { + List targetStreams = getStepIOMeta().getTargetStreams(); + + targetStreams.get( 0 ).setSubject( XMLHandler.getTagValue( stepnode, "send_true_to" ) ); + targetStreams.get( 1 ).setSubject( XMLHandler.getTagValue( stepnode, "send_false_to" ) ); + + condition = XMLHandler.getTagValue( stepnode, "condition" ); + } + + public String getXML() { + StringBuffer retval = new StringBuffer(); + + List targetStreams = getStepIOMeta().getTargetStreams(); + retval.append( XMLHandler.addTagValue( "send_true_to", targetStreams.get( 0 ).getStepname() ) ); + retval.append( XMLHandler.addTagValue( "send_false_to", targetStreams.get( 1 ).getStepname() ) ); + + retval.append( XMLHandler.addTagValue( "condition", condition ) ); + + return retval.toString(); + } + + public boolean equals( Object obj ) { + if ( obj != null && ( obj.getClass().equals( this.getClass() ) ) ) { + JavaFilterMeta m = (JavaFilterMeta) obj; + return ( getXML() == m.getXML() ); + } + + return false; + } + + public Object clone() { + JavaFilterMeta retval = (JavaFilterMeta) super.clone(); + return retval; + } + + public void setDefault() { + condition = "true"; + } + + public void readRep( Repository rep, IMetaStore metaStore, ObjectId id_step, List databases ) throws KettleException { + List targetStreams = getStepIOMeta().getTargetStreams(); + + targetStreams.get( 0 ).setSubject( rep.getStepAttributeString( id_step, "send_true_to" ) ); + targetStreams.get( 1 ).setSubject( rep.getStepAttributeString( id_step, "send_false_to" ) ); + + condition = rep.getStepAttributeString( id_step, "condition" ); + } + + @Override + public void searchInfoAndTargetSteps( List steps ) { + for ( StreamInterface stream : getStepIOMeta().getTargetStreams() ) { + stream.setStepMeta( StepMeta.findStep( steps, (String) stream.getSubject() ) ); + } + } + + public void saveRep( Repository rep, IMetaStore metaStore, ObjectId id_transformation, ObjectId id_step ) throws KettleException { + List targetStreams = getStepIOMeta().getTargetStreams(); + + rep.saveStepAttribute( id_transformation, id_step, "send_true_to", targetStreams.get( 0 ).getStepname() ); + rep.saveStepAttribute( id_transformation, id_step, "send_false_to", targetStreams.get( 1 ).getStepname() ); + + rep.saveStepAttribute( id_transformation, id_step, "condition", condition ); + } + + public void check( List remarks, TransMeta transMeta, StepMeta stepMeta, + RowMetaInterface prev, String[] input, String[] output, RowMetaInterface info, VariableSpace space, + Repository repository, IMetaStore metaStore ) { + CheckResult cr; + String error_message = ""; + + List targetStreams = getStepIOMeta().getTargetStreams(); + + if ( targetStreams.get( 0 ).getStepname() != null && targetStreams.get( 1 ).getStepname() != null ) { + cr = + new CheckResult( CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( + PKG, "JavaFilterMeta.CheckResult.BothTrueAndFalseStepSpecified" ), stepMeta ); + remarks.add( cr ); + } else if ( targetStreams.get( 0 ).getStepname() == null && targetStreams.get( 1 ).getStepname() == null ) { + cr = + new CheckResult( CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( + PKG, "JavaFilterMeta.CheckResult.NeitherTrueAndFalseStepSpecified" ), stepMeta ); + remarks.add( cr ); + } else { + cr = + new CheckResult( CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( + PKG, "JavaFilterMeta.CheckResult.PlsSpecifyBothTrueAndFalseStep" ), stepMeta ); + remarks.add( cr ); + } + + if ( targetStreams.get( 0 ).getStepname() != null ) { + int trueTargetIdx = Const.indexOfString( targetStreams.get( 0 ).getStepname(), output ); + if ( trueTargetIdx < 0 ) { + cr = + new CheckResult( + CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages.getString( + PKG, "JavaFilterMeta.CheckResult.TargetStepInvalid", "true", targetStreams + .get( 0 ).getStepname() ), stepMeta ); + remarks.add( cr ); + } + } + + if ( targetStreams.get( 1 ).getStepname() != null ) { + int falseTargetIdx = Const.indexOfString( targetStreams.get( 1 ).getStepname(), output ); + if ( falseTargetIdx < 0 ) { + cr = + new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages + .getString( PKG, "JavaFilterMeta.CheckResult.TargetStepInvalid", "false", targetStreams + .get( 1 ).getStepname() ), stepMeta ); + remarks.add( cr ); + } + } + + if ( Const.isEmpty( condition ) ) { + cr = + new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages.getString( + PKG, "JavaFilterMeta.CheckResult.NoConditionSpecified" ), stepMeta ); + } else { + cr = + new CheckResult( CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( + PKG, "JavaFilterMeta.CheckResult.ConditionSpecified" ), stepMeta ); + } + remarks.add( cr ); + + // Look up fields in the input stream + if ( prev != null && prev.size() > 0 ) { + cr = + new CheckResult( CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( + PKG, "JavaFilterMeta.CheckResult.StepReceivingFields", prev.size() + "" ), stepMeta ); + remarks.add( cr ); + + // What fields are used in the condition? + // TODO: verify condition, parse it + // + } else { + error_message = + BaseMessages.getString( PKG, "JavaFilterMeta.CheckResult.CouldNotReadFieldsFromPreviousStep" ) + + Const.CR; + cr = new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, error_message, stepMeta ); + remarks.add( cr ); + } + + // See if we have input streams leading to this step! + if ( input.length > 0 ) { + cr = + new CheckResult( CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( + PKG, "JavaFilterMeta.CheckResult.StepReceivingInfoFromOtherSteps" ), stepMeta ); + remarks.add( cr ); + } else { + cr = + new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages.getString( + PKG, "JavaFilterMeta.CheckResult.NoInputReceivedFromOtherSteps" ), stepMeta ); + remarks.add( cr ); + } + } + + public StepInterface getStep( StepMeta stepMeta, StepDataInterface stepDataInterface, int cnr, TransMeta tr, + Trans trans ) { + return new JavaFilter( stepMeta, stepDataInterface, cnr, tr, trans ); + } + + public StepDataInterface getStepData() { + return new JavaFilterData(); + } + + /** + * Returns the Input/Output metadata for this step. + */ + public StepIOMetaInterface getStepIOMeta() { + if ( ioMeta == null ) { + + ioMeta = new StepIOMeta( true, true, false, false, false, false ); + + ioMeta.addStream( new Stream( StreamType.TARGET, null, BaseMessages.getString( + PKG, "JavaFilterMeta.InfoStream.True.Description" ), StreamIcon.TRUE, null ) ); + ioMeta.addStream( new Stream( StreamType.TARGET, null, BaseMessages.getString( + PKG, "JavaFilterMeta.InfoStream.False.Description" ), StreamIcon.FALSE, null ) ); + } + + return ioMeta; + } + + @Override + public void resetStepIoMeta() { + } + + @Override + public boolean excludeFromCopyDistributeVerification() { + return true; + } +} diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/mergejoin/MergeJoinMeta.java b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/mergejoin/MergeJoinMeta.java new file mode 100644 index 0000000..8d307f5 --- /dev/null +++ b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/mergejoin/MergeJoinMeta.java @@ -0,0 +1,329 @@ +/*! ****************************************************************************** + * + * Pentaho Data Integration + * + * Copyright (C) 2002-2013 by Pentaho : http://www.pentaho.com + * + ******************************************************************************* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + ******************************************************************************/ + +package org.pentaho.di.trans.steps.mergejoin; + +import java.util.List; + +import org.pentaho.di.core.CheckResult; +import org.pentaho.di.core.CheckResultInterface; +import org.pentaho.di.core.Const; +import org.pentaho.di.core.database.DatabaseMeta; +import org.pentaho.di.core.exception.KettleException; +import org.pentaho.di.core.exception.KettleStepException; +import org.pentaho.di.core.exception.KettleXMLException; +import org.pentaho.di.core.row.RowMetaInterface; +import org.pentaho.di.core.row.ValueMetaInterface; +import org.pentaho.di.core.variables.VariableSpace; +import org.pentaho.di.core.xml.XMLHandler; +import org.pentaho.di.i18n.BaseMessages; +import org.pentaho.di.repository.ObjectId; +import org.pentaho.di.repository.Repository; +import org.pentaho.di.trans.Trans; +import org.pentaho.di.trans.TransMeta; +import org.pentaho.di.trans.TransMeta.TransformationType; +import org.pentaho.di.trans.step.BaseStepMeta; +import org.pentaho.di.trans.step.StepDataInterface; +import org.pentaho.di.trans.step.StepIOMeta; +import org.pentaho.di.trans.step.StepIOMetaInterface; +import org.pentaho.di.trans.step.StepInterface; +import org.pentaho.di.trans.step.StepMeta; +import org.pentaho.di.trans.step.StepMetaInterface; +import org.pentaho.di.trans.step.errorhandling.Stream; +import org.pentaho.di.trans.step.errorhandling.StreamIcon; +import org.pentaho.di.trans.step.errorhandling.StreamInterface; +import org.pentaho.di.trans.step.errorhandling.StreamInterface.StreamType; +import org.pentaho.metastore.api.IMetaStore; +import org.w3c.dom.Node; + +/* + * @author Biswapesh + * @since 24-nov-2006 + */ + +public class MergeJoinMeta extends BaseStepMeta implements StepMetaInterface { + private static Class PKG = MergeJoinMeta.class; // for i18n purposes, needed by Translator2!! + + public static final String[] join_types = { "INNER", "LEFT OUTER", "RIGHT OUTER", "FULL OUTER" }; + public static final boolean[] one_optionals = { false, false, true, true }; + public static final boolean[] two_optionals = { false, true, false, true }; + + private String joinType; + + private String[] keyFields1; + private String[] keyFields2; + + /** + * The supported join types are INNER, LEFT OUTER, RIGHT OUTER and FULL OUTER + * + * @return The type of join + */ + public String getJoinType() { + return joinType; + } + + /** + * Sets the type of join + * + * @param joinType The type of join, e.g. INNER/FULL OUTER + */ + public void setJoinType( String joinType ) { + this.joinType = joinType; + } + + /** + * @return Returns the keyFields1. + */ + public String[] getKeyFields1() { + return keyFields1; + } + + /** + * @param keyFields1 The keyFields1 to set. + */ + public void setKeyFields1( String[] keyFields1 ) { + this.keyFields1 = keyFields1; + } + + /** + * @return Returns the keyFields2. + */ + public String[] getKeyFields2() { + return keyFields2; + } + + /** + * @param keyFields2 The keyFields2 to set. + */ + public void setKeyFields2( String[] keyFields2 ) { + this.keyFields2 = keyFields2; + } + + public boolean excludeFromRowLayoutVerification() { + return true; + } + + public MergeJoinMeta() { + super(); // allocate BaseStepMeta + } + + public void loadXML( Node stepnode, List databases, IMetaStore metaStore ) throws KettleXMLException { + readData( stepnode ); + } + + public void allocate( int nrKeys1, int nrKeys2 ) { + keyFields1 = new String[nrKeys1]; + keyFields2 = new String[nrKeys2]; + } + + public Object clone() { + MergeJoinMeta retval = (MergeJoinMeta) super.clone(); + int nrKeys1 = keyFields1.length; + int nrKeys2 = keyFields2.length; + retval.allocate( nrKeys1, nrKeys2 ); + System.arraycopy( keyFields1, 0, retval.keyFields1, 0, nrKeys1 ); + System.arraycopy( keyFields2, 0, retval.keyFields2, 0, nrKeys2 ); + + return retval; + } + + public String getXML() { + StringBuffer retval = new StringBuffer(); + + List infoStreams = getStepIOMeta().getInfoStreams(); + + retval.append( XMLHandler.addTagValue( "join_type", getJoinType() ) ); + retval.append( XMLHandler.addTagValue( "step1", infoStreams.get( 0 ).getStepname() ) ); + retval.append( XMLHandler.addTagValue( "step2", infoStreams.get( 1 ).getStepname() ) ); + + retval.append( " " + Const.CR ); + for ( int i = 0; i < keyFields1.length; i++ ) { + retval.append( " " + XMLHandler.addTagValue( "key", keyFields1[i] ) ); + } + retval.append( " " + Const.CR ); + + retval.append( " " + Const.CR ); + for ( int i = 0; i < keyFields2.length; i++ ) { + retval.append( " " + XMLHandler.addTagValue( "key", keyFields2[i] ) ); + } + retval.append( " " + Const.CR ); + + return retval.toString(); + } + + private void readData( Node stepnode ) throws KettleXMLException { + try { + + Node keysNode1 = XMLHandler.getSubNode( stepnode, "keys_1" ); + Node keysNode2 = XMLHandler.getSubNode( stepnode, "keys_2" ); + + int nrKeys1 = XMLHandler.countNodes( keysNode1, "key" ); + int nrKeys2 = XMLHandler.countNodes( keysNode2, "key" ); + + allocate( nrKeys1, nrKeys2 ); + + for ( int i = 0; i < nrKeys1; i++ ) { + Node keynode = XMLHandler.getSubNodeByNr( keysNode1, "key", i ); + keyFields1[i] = XMLHandler.getNodeValue( keynode ); + } + + for ( int i = 0; i < nrKeys2; i++ ) { + Node keynode = XMLHandler.getSubNodeByNr( keysNode2, "key", i ); + keyFields2[i] = XMLHandler.getNodeValue( keynode ); + } + + List infoStreams = getStepIOMeta().getInfoStreams(); + infoStreams.get( 0 ).setSubject( XMLHandler.getTagValue( stepnode, "step1" ) ); + infoStreams.get( 1 ).setSubject( XMLHandler.getTagValue( stepnode, "step2" ) ); + joinType = XMLHandler.getTagValue( stepnode, "join_type" ); + } catch ( Exception e ) { + throw new KettleXMLException( + BaseMessages.getString( PKG, "MergeJoinMeta.Exception.UnableToLoadStepInfo" ), e ); + } + } + + public void setDefault() { + joinType = join_types[0]; + allocate( 0, 0 ); + } + + public void readRep( Repository rep, IMetaStore metaStore, ObjectId id_step, List databases ) throws KettleException { + try { + int nrKeys1 = rep.countNrStepAttributes( id_step, "keys_1" ); + int nrKeys2 = rep.countNrStepAttributes( id_step, "keys_2" ); + + allocate( nrKeys1, nrKeys2 ); + + for ( int i = 0; i < nrKeys1; i++ ) { + keyFields1[i] = rep.getStepAttributeString( id_step, i, "keys_1" ); + } + for ( int i = 0; i < nrKeys2; i++ ) { + keyFields2[i] = rep.getStepAttributeString( id_step, i, "keys_2" ); + } + + List infoStreams = getStepIOMeta().getInfoStreams(); + infoStreams.get( 0 ).setSubject( rep.getStepAttributeString( id_step, "step1" ) ); + infoStreams.get( 1 ).setSubject( rep.getStepAttributeString( id_step, "step2" ) ); + joinType = rep.getStepAttributeString( id_step, "join_type" ); + } catch ( Exception e ) { + throw new KettleException( BaseMessages.getString( + PKG, "MergeJoinMeta.Exception.UnexpectedErrorReadingStepInfo" ), e ); + } + } + + @Override + public void searchInfoAndTargetSteps( List steps ) { + for ( StreamInterface stream : getStepIOMeta().getInfoStreams() ) { + stream.setStepMeta( StepMeta.findStep( steps, (String) stream.getSubject() ) ); + } + } + + public void saveRep( Repository rep, IMetaStore metaStore, ObjectId id_transformation, ObjectId id_step ) throws KettleException { + try { + for ( int i = 0; i < keyFields1.length; i++ ) { + rep.saveStepAttribute( id_transformation, id_step, i, "keys_1", keyFields1[i] ); + } + + for ( int i = 0; i < keyFields2.length; i++ ) { + rep.saveStepAttribute( id_transformation, id_step, i, "keys_2", keyFields2[i] ); + } + + List infoStreams = getStepIOMeta().getInfoStreams(); + + rep.saveStepAttribute( id_transformation, id_step, "step1", infoStreams.get( 0 ).getStepname() ); + rep.saveStepAttribute( id_transformation, id_step, "step2", infoStreams.get( 1 ).getStepname() ); + rep.saveStepAttribute( id_transformation, id_step, "join_type", getJoinType() ); + } catch ( Exception e ) { + throw new KettleException( BaseMessages.getString( PKG, "MergeJoinMeta.Exception.UnableToSaveStepInfo" ) + + id_step, e ); + } + } + + public void check( List remarks, TransMeta transMeta, StepMeta stepMeta, + RowMetaInterface prev, String[] input, String[] output, RowMetaInterface info, VariableSpace space, + Repository repository, IMetaStore metaStore ) { + /* + * @todo Need to check for the following: 1) Join type must be one of INNER / LEFT OUTER / RIGHT OUTER / FULL OUTER + * 2) Number of input streams must be two (for now at least) 3) The field names of input streams must be unique + */ + CheckResult cr = + new CheckResult( CheckResultInterface.TYPE_RESULT_WARNING, BaseMessages.getString( + PKG, "MergeJoinMeta.CheckResult.StepNotVerified" ), stepMeta ); + remarks.add( cr ); + } + + public void getFields( RowMetaInterface r, String name, RowMetaInterface[] info, StepMeta nextStep, + VariableSpace space, Repository repository, IMetaStore metaStore ) throws KettleStepException { + // We don't have any input fields here in "r" as they are all info fields. + // So we just merge in the info fields. + // + if ( info != null ) { + for ( int i = 0; i < info.length; i++ ) { + if ( info[i] != null ) { + r.mergeRowMeta( info[i], name ); + } + } + } + + for ( int i = 0; i < r.size(); i++ ) { + ValueMetaInterface vmi = r.getValueMeta( i ); + if ( vmi != null && Const.isEmpty( vmi.getName() ) ) { + vmi.setOrigin( name ); + } + } + return; + } + + public StepInterface getStep( StepMeta stepMeta, StepDataInterface stepDataInterface, int cnr, TransMeta tr, + Trans trans ) { + return new MergeJoin( stepMeta, stepDataInterface, cnr, tr, trans ); + } + + public StepDataInterface getStepData() { + return new MergeJoinData(); + } + + /** + * Returns the Input/Output metadata for this step. The generator step only produces output, does not accept input! + */ + public StepIOMetaInterface getStepIOMeta() { + if ( ioMeta == null ) { + + ioMeta = new StepIOMeta( true, true, false, false, false, false ); + + ioMeta.addStream( new Stream( StreamType.INFO, null, BaseMessages.getString( + PKG, "MergeJoinMeta.InfoStream.FirstStream.Description" ), StreamIcon.INFO, null ) ); + ioMeta.addStream( new Stream( StreamType.INFO, null, BaseMessages.getString( + PKG, "MergeJoinMeta.InfoStream.SecondStream.Description" ), StreamIcon.INFO, null ) ); + } + + return ioMeta; + } + + public void resetStepIoMeta() { + // Don't reset! + } + + public TransformationType[] getSupportedTransformationTypes() { + return new TransformationType[]{ TransformationType.Normal, }; + } +} diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/mergerows/MergeRowsMeta.java b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/mergerows/MergeRowsMeta.java new file mode 100644 index 0000000..aebf062 --- /dev/null +++ b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/mergerows/MergeRowsMeta.java @@ -0,0 +1,358 @@ +/*! ****************************************************************************** + * + * Pentaho Data Integration + * + * Copyright (C) 2002-2013 by Pentaho : http://www.pentaho.com + * + ******************************************************************************* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + ******************************************************************************/ + +package org.pentaho.di.trans.steps.mergerows; + +import java.util.List; + +import org.pentaho.di.core.CheckResult; +import org.pentaho.di.core.CheckResultInterface; +import org.pentaho.di.core.Const; +import org.pentaho.di.core.database.DatabaseMeta; +import org.pentaho.di.core.exception.KettleException; +import org.pentaho.di.core.exception.KettleStepException; +import org.pentaho.di.core.exception.KettleXMLException; +import org.pentaho.di.core.row.RowMetaInterface; +import org.pentaho.di.core.row.ValueMeta; +import org.pentaho.di.core.row.ValueMetaInterface; +import org.pentaho.di.core.variables.VariableSpace; +import org.pentaho.di.core.xml.XMLHandler; +import org.pentaho.di.i18n.BaseMessages; +import org.pentaho.di.repository.ObjectId; +import org.pentaho.di.repository.Repository; +import org.pentaho.di.trans.Trans; +import org.pentaho.di.trans.TransMeta; +import org.pentaho.di.trans.TransMeta.TransformationType; +import org.pentaho.di.trans.step.BaseStepMeta; +import org.pentaho.di.trans.step.StepDataInterface; +import org.pentaho.di.trans.step.StepIOMeta; +import org.pentaho.di.trans.step.StepIOMetaInterface; +import org.pentaho.di.trans.step.StepInterface; +import org.pentaho.di.trans.step.StepMeta; +import org.pentaho.di.trans.step.StepMetaInterface; +import org.pentaho.di.trans.step.errorhandling.Stream; +import org.pentaho.di.trans.step.errorhandling.StreamIcon; +import org.pentaho.di.trans.step.errorhandling.StreamInterface; +import org.pentaho.di.trans.step.errorhandling.StreamInterface.StreamType; +import org.pentaho.metastore.api.IMetaStore; +import org.w3c.dom.Node; + +/* + * Created on 02-jun-2003 + * + */ + +public class MergeRowsMeta extends BaseStepMeta implements StepMetaInterface { + private static Class PKG = MergeRowsMeta.class; // for i18n purposes, needed by Translator2!! + + private String flagField; + + private String[] keyFields; + private String[] valueFields; + + /** + * @return Returns the keyFields. + */ + public String[] getKeyFields() { + return keyFields; + } + + /** + * @param keyFields + * The keyFields to set. + */ + public void setKeyFields( String[] keyFields ) { + this.keyFields = keyFields; + } + + /** + * @return Returns the valueFields. + */ + public String[] getValueFields() { + return valueFields; + } + + /** + * @param valueFields + * The valueFields to set. + */ + public void setValueFields( String[] valueFields ) { + this.valueFields = valueFields; + } + + public MergeRowsMeta() { + super(); // allocate BaseStepMeta + } + + public void loadXML( Node stepnode, List databases, IMetaStore metaStore ) throws KettleXMLException { + readData( stepnode ); + } + + /** + * @return Returns the flagField. + */ + public String getFlagField() { + return flagField; + } + + /** + * @param flagField + * The flagField to set. + */ + public void setFlagField( String flagField ) { + this.flagField = flagField; + } + + public void allocate( int nrKeys, int nrValues ) { + keyFields = new String[nrKeys]; + valueFields = new String[nrValues]; + } + + public Object clone() { + MergeRowsMeta retval = (MergeRowsMeta) super.clone(); + int nrKeys = keyFields.length; + int nrValues = valueFields.length; + retval.allocate( nrKeys, nrValues ); + System.arraycopy( keyFields, 0, retval.keyFields, 0, nrKeys ); + System.arraycopy( valueFields, 0, retval.valueFields, 0, nrValues ); + return retval; + } + + public String getXML() { + StringBuffer retval = new StringBuffer(); + + retval.append( " " + Const.CR ); + for ( int i = 0; i < keyFields.length; i++ ) { + retval.append( " " + XMLHandler.addTagValue( "key", keyFields[i] ) ); + } + retval.append( " " + Const.CR ); + + retval.append( " " + Const.CR ); + for ( int i = 0; i < valueFields.length; i++ ) { + retval.append( " " + XMLHandler.addTagValue( "value", valueFields[i] ) ); + } + retval.append( " " + Const.CR ); + + retval.append( XMLHandler.addTagValue( "flag_field", flagField ) ); + + List infoStreams = getStepIOMeta().getInfoStreams(); + retval.append( XMLHandler.addTagValue( "reference", infoStreams.get( 0 ).getStepname() ) ); + retval.append( XMLHandler.addTagValue( "compare", infoStreams.get( 1 ).getStepname() ) ); + retval.append( " " + Const.CR ); + + retval.append( " " + Const.CR ); + + return retval.toString(); + } + + private void readData( Node stepnode ) throws KettleXMLException { + try { + + Node keysnode = XMLHandler.getSubNode( stepnode, "keys" ); + Node valuesnode = XMLHandler.getSubNode( stepnode, "values" ); + + int nrKeys = XMLHandler.countNodes( keysnode, "key" ); + int nrValues = XMLHandler.countNodes( valuesnode, "value" ); + + allocate( nrKeys, nrValues ); + + for ( int i = 0; i < nrKeys; i++ ) { + Node keynode = XMLHandler.getSubNodeByNr( keysnode, "key", i ); + keyFields[i] = XMLHandler.getNodeValue( keynode ); + } + + for ( int i = 0; i < nrValues; i++ ) { + Node valuenode = XMLHandler.getSubNodeByNr( valuesnode, "value", i ); + valueFields[i] = XMLHandler.getNodeValue( valuenode ); + } + + flagField = XMLHandler.getTagValue( stepnode, "flag_field" ); + + List infoStreams = getStepIOMeta().getInfoStreams(); + StreamInterface referenceStream = infoStreams.get( 0 ); + StreamInterface compareStream = infoStreams.get( 1 ); + + compareStream.setSubject( XMLHandler.getTagValue( stepnode, "compare" ) ); + referenceStream.setSubject( XMLHandler.getTagValue( stepnode, "reference" ) ); + } catch ( Exception e ) { + throw new KettleXMLException( + BaseMessages.getString( PKG, "MergeRowsMeta.Exception.UnableToLoadStepInfo" ), e ); + } + } + + public void setDefault() { + flagField = "flagfield"; + allocate( 0, 0 ); + } + + public void readRep( Repository rep, IMetaStore metaStore, ObjectId id_step, List databases ) throws KettleException { + try { + int nrKeys = rep.countNrStepAttributes( id_step, "key_field" ); + int nrValues = rep.countNrStepAttributes( id_step, "value_field" ); + + allocate( nrKeys, nrValues ); + + for ( int i = 0; i < nrKeys; i++ ) { + keyFields[i] = rep.getStepAttributeString( id_step, i, "key_field" ); + } + for ( int i = 0; i < nrValues; i++ ) { + valueFields[i] = rep.getStepAttributeString( id_step, i, "value_field" ); + } + + flagField = rep.getStepAttributeString( id_step, "flag_field" ); + + List infoStreams = getStepIOMeta().getInfoStreams(); + StreamInterface referenceStream = infoStreams.get( 0 ); + StreamInterface compareStream = infoStreams.get( 1 ); + + referenceStream.setSubject( rep.getStepAttributeString( id_step, "reference" ) ); + compareStream.setSubject( rep.getStepAttributeString( id_step, "compare" ) ); + } catch ( Exception e ) { + throw new KettleException( BaseMessages.getString( + PKG, "MergeRowsMeta.Exception.UnexpectedErrorReadingStepInfo" ), e ); + } + } + + @Override + public void searchInfoAndTargetSteps( List steps ) { + for ( StreamInterface stream : getStepIOMeta().getInfoStreams() ) { + stream.setStepMeta( StepMeta.findStep( steps, (String) stream.getSubject() ) ); + } + } + + public void saveRep( Repository rep, IMetaStore metaStore, ObjectId id_transformation, ObjectId id_step ) throws KettleException { + try { + for ( int i = 0; i < keyFields.length; i++ ) { + rep.saveStepAttribute( id_transformation, id_step, i, "key_field", keyFields[i] ); + } + + for ( int i = 0; i < valueFields.length; i++ ) { + rep.saveStepAttribute( id_transformation, id_step, i, "value_field", valueFields[i] ); + } + + rep.saveStepAttribute( id_transformation, id_step, "flag_field", flagField ); + + List infoStreams = getStepIOMeta().getInfoStreams(); + StreamInterface referenceStream = infoStreams.get( 0 ); + StreamInterface compareStream = infoStreams.get( 1 ); + + rep.saveStepAttribute( id_transformation, id_step, "reference", referenceStream.getStepname() ); + rep.saveStepAttribute( id_transformation, id_step, "compare", compareStream.getStepname() ); + } catch ( Exception e ) { + throw new KettleException( BaseMessages.getString( PKG, "MergeRowsMeta.Exception.UnableToSaveStepInfo" ) + + id_step, e ); + } + } + + public boolean chosesTargetSteps() { + return false; + } + + public String[] getTargetSteps() { + return null; + } + + public void getFields( RowMetaInterface r, String name, RowMetaInterface[] info, StepMeta nextStep, + VariableSpace space, Repository repository, IMetaStore metaStore ) throws KettleStepException { + // We don't have any input fields here in "r" as they are all info fields. + // So we just merge in the info fields. + // + if ( info != null ) { + boolean found = false; + for ( int i = 0; i < info.length && !found; i++ ) { + if ( info[i] != null ) { + r.mergeRowMeta( info[i] ); + found = true; + } + } + } + + if ( Const.isEmpty( flagField ) ) { + throw new KettleStepException( BaseMessages.getString( PKG, "MergeRowsMeta.Exception.FlagFieldNotSpecified" ) ); + } + ValueMetaInterface flagFieldValue = new ValueMeta( flagField, ValueMetaInterface.TYPE_STRING ); + flagFieldValue.setOrigin( name ); + r.addValueMeta( flagFieldValue ); + + } + + public void check( List remarks, TransMeta transMeta, StepMeta stepMeta, + RowMetaInterface prev, String[] input, String[] output, RowMetaInterface info, VariableSpace space, + Repository repository, IMetaStore metaStore ) { + CheckResult cr; + + List infoStreams = getStepIOMeta().getInfoStreams(); + StreamInterface referenceStream = infoStreams.get( 0 ); + StreamInterface compareStream = infoStreams.get( 1 ); + + if ( referenceStream.getStepname() != null && compareStream.getStepname() != null ) { + cr = + new CheckResult( CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( + PKG, "MergeRowsMeta.CheckResult.SourceStepsOK" ), stepMeta ); + remarks.add( cr ); + } else if ( referenceStream.getStepname() == null && compareStream.getStepname() == null ) { + cr = + new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages.getString( + PKG, "MergeRowsMeta.CheckResult.SourceStepsMissing" ), stepMeta ); + remarks.add( cr ); + } else { + cr = + new CheckResult( CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( + PKG, "MergeRowsMeta.CheckResult.OneSourceStepMissing" ), stepMeta ); + remarks.add( cr ); + } + } + + public StepInterface getStep( StepMeta stepMeta, StepDataInterface stepDataInterface, int cnr, TransMeta tr, + Trans trans ) { + return new MergeRows( stepMeta, stepDataInterface, cnr, tr, trans ); + } + + public StepDataInterface getStepData() { + return new MergeRowsData(); + } + + /** + * Returns the Input/Output metadata for this step. + */ + public StepIOMetaInterface getStepIOMeta() { + if ( ioMeta == null ) { + + ioMeta = new StepIOMeta( true, true, false, false, false, false ); + + ioMeta.addStream( new Stream( StreamType.INFO, null, BaseMessages.getString( + PKG, "MergeRowsMeta.InfoStream.FirstStream.Description" ), StreamIcon.INFO, null ) ); + ioMeta.addStream( new Stream( StreamType.INFO, null, BaseMessages.getString( + PKG, "MergeRowsMeta.InfoStream.SecondStream.Description" ), StreamIcon.INFO, null ) ); + } + + return ioMeta; + } + + public void resetStepIoMeta() { + } + + public TransformationType[] getSupportedTransformationTypes() { + return new TransformationType[] { TransformationType.Normal, }; + } + +} diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/tableinput/TableInputMeta.java b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/tableinput/TableInputMeta.java new file mode 100644 index 0000000..8f5df7c --- /dev/null +++ b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/tableinput/TableInputMeta.java @@ -0,0 +1,572 @@ +/*! ****************************************************************************** + * + * Pentaho Data Integration + * + * Copyright (C) 2002-2013 by Pentaho : http://www.pentaho.com + * + ******************************************************************************* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + ******************************************************************************/ + +package org.pentaho.di.trans.steps.tableinput; + +import org.pentaho.di.core.CheckResult; +import org.pentaho.di.core.CheckResultInterface; +import org.pentaho.di.core.Const; +import org.pentaho.di.core.database.Database; +import org.pentaho.di.core.database.DatabaseMeta; +import org.pentaho.di.core.exception.KettleDatabaseException; +import org.pentaho.di.core.exception.KettleException; +import org.pentaho.di.core.exception.KettlePluginException; +import org.pentaho.di.core.exception.KettleStepException; +import org.pentaho.di.core.exception.KettleXMLException; +import org.pentaho.di.core.row.RowDataUtil; +import org.pentaho.di.core.row.RowMeta; +import org.pentaho.di.core.row.RowMetaInterface; +import org.pentaho.di.core.row.ValueMetaInterface; +import org.pentaho.di.core.row.value.ValueMetaFactory; +import org.pentaho.di.core.variables.VariableSpace; +import org.pentaho.di.core.xml.XMLHandler; +import org.pentaho.di.i18n.BaseMessages; +import org.pentaho.di.repository.ObjectId; +import org.pentaho.di.repository.Repository; +import org.pentaho.di.shared.SharedObjectInterface; +import org.pentaho.di.trans.DatabaseImpact; +import org.pentaho.di.trans.Trans; +import org.pentaho.di.trans.TransMeta; +import org.pentaho.di.trans.step.BaseStepMeta; +import org.pentaho.di.trans.step.StepDataInterface; +import org.pentaho.di.trans.step.StepIOMeta; +import org.pentaho.di.trans.step.StepIOMetaInterface; +import org.pentaho.di.trans.step.StepInjectionMetaEntry; +import org.pentaho.di.trans.step.StepInterface; +import org.pentaho.di.trans.step.StepMeta; +import org.pentaho.di.trans.step.StepMetaInterface; +import org.pentaho.di.trans.step.errorhandling.Stream; +import org.pentaho.di.trans.step.errorhandling.StreamIcon; +import org.pentaho.di.trans.step.errorhandling.StreamInterface; +import org.pentaho.di.trans.step.errorhandling.StreamInterface.StreamType; +import org.pentaho.metastore.api.IMetaStore; +import org.w3c.dom.Node; + +import java.util.List; + +/* + * Created on 2-jun-2003 + * + */ +public class TableInputMeta extends BaseStepMeta implements StepMetaInterface { + private static Class PKG = TableInputMeta.class; // for i18n purposes, needed by Translator2!! + + private DatabaseMeta databaseMeta; + private String sql; + private String rowLimit; + + /** Should I execute once per row? */ + private boolean executeEachInputRow; + + private boolean variableReplacementActive; + + private boolean lazyConversionActive; + + public TableInputMeta() { + super(); + } + + /** + * @return Returns true if the step should be run per row + */ + public boolean isExecuteEachInputRow() { + return executeEachInputRow; + } + + /** + * @param oncePerRow + * true if the step should be run per row + */ + public void setExecuteEachInputRow( boolean oncePerRow ) { + this.executeEachInputRow = oncePerRow; + } + + /** + * @return Returns the database. + */ + public DatabaseMeta getDatabaseMeta() { + return databaseMeta; + } + + /** + * @param database + * The database to set. + */ + public void setDatabaseMeta( DatabaseMeta database ) { + this.databaseMeta = database; + } + + /** + * @return Returns the rowLimit. + */ + public String getRowLimit() { + return rowLimit; + } + + /** + * @param rowLimit + * The rowLimit to set. + */ + public void setRowLimit( String rowLimit ) { + this.rowLimit = rowLimit; + } + + /** + * @return Returns the sql. + */ + public String getSQL() { + return sql; + } + + /** + * @param sql + * The sql to set. + */ + public void setSQL( String sql ) { + this.sql = sql; + } + + public void loadXML( Node stepnode, List databases, IMetaStore metaStore ) throws KettleXMLException { + readData( stepnode, databases ); + } + + public Object clone() { + TableInputMeta retval = (TableInputMeta) super.clone(); + return retval; + } + + private void readData( Node stepnode, List databases ) throws KettleXMLException { + try { + databaseMeta = DatabaseMeta.findDatabase( databases, XMLHandler.getTagValue( stepnode, "connection" ) ); + sql = XMLHandler.getTagValue( stepnode, "sql" ); + rowLimit = XMLHandler.getTagValue( stepnode, "limit" ); + + String lookupFromStepname = XMLHandler.getTagValue( stepnode, "lookup" ); + StreamInterface infoStream = getStepIOMeta().getInfoStreams().get( 0 ); + infoStream.setSubject( lookupFromStepname ); + + executeEachInputRow = "Y".equals( XMLHandler.getTagValue( stepnode, "execute_each_row" ) ); + variableReplacementActive = "Y".equals( XMLHandler.getTagValue( stepnode, "variables_active" ) ); + lazyConversionActive = "Y".equals( XMLHandler.getTagValue( stepnode, "lazy_conversion_active" ) ); + } catch ( Exception e ) { + throw new KettleXMLException( "Unable to load step info from XML", e ); + } + } + + public void setDefault() { + databaseMeta = null; + sql = "SELECT FROM WHERE "; + rowLimit = "0"; + } + + protected Database getDatabase() { + // Added for test purposes + return new Database( loggingObject, databaseMeta ); + } + + public void getFields( RowMetaInterface row, String origin, RowMetaInterface[] info, StepMeta nextStep, + VariableSpace space, Repository repository, IMetaStore metaStore ) throws KettleStepException { + if ( databaseMeta == null ) { + return; // TODO: throw an exception here + } + + boolean param = false; + + Database db = getDatabase(); + databases = new Database[] { db }; // keep track of it for canceling purposes... + + // First try without connecting to the database... (can be S L O W) + String sNewSQL = sql; + if ( isVariableReplacementActive() ) { + sNewSQL = db.environmentSubstitute( sql ); + if ( space != null ) { + sNewSQL = space.environmentSubstitute( sNewSQL ); + } + } + + RowMetaInterface add = null; + try { + add = db.getQueryFields( sNewSQL, param ); + } catch ( KettleDatabaseException dbe ) { + throw new KettleStepException( "Unable to get queryfields for SQL: " + Const.CR + sNewSQL, dbe ); + } + + if ( add != null ) { + for ( int i = 0; i < add.size(); i++ ) { + ValueMetaInterface v = add.getValueMeta( i ); + v.setOrigin( origin ); + } + row.addRowMeta( add ); + } else { + try { + db.connect(); + + RowMetaInterface paramRowMeta = null; + Object[] paramData = null; + + StreamInterface infoStream = getStepIOMeta().getInfoStreams().get( 0 ); + if ( !Const.isEmpty( infoStream.getStepname() ) ) { + param = true; + if ( info.length >= 0 && info[0] != null ) { + paramRowMeta = info[0]; + paramData = RowDataUtil.allocateRowData( paramRowMeta.size() ); + } + } + + add = db.getQueryFields( sNewSQL, param, paramRowMeta, paramData ); + + if ( add == null ) { + return; + } + for ( int i = 0; i < add.size(); i++ ) { + ValueMetaInterface v = add.getValueMeta( i ); + v.setOrigin( origin ); + } + row.addRowMeta( add ); + } catch ( KettleException ke ) { + throw new KettleStepException( "Unable to get queryfields for SQL: " + Const.CR + sNewSQL, ke ); + } finally { + db.disconnect(); + } + } + if ( isLazyConversionActive() ) { + for ( int i = 0; i < row.size(); i++ ) { + ValueMetaInterface v = row.getValueMeta( i ); + try { + if ( v.getType() == ValueMetaInterface.TYPE_STRING ) { + ValueMetaInterface storageMeta = ValueMetaFactory.cloneValueMeta( v ); + storageMeta.setStorageType( ValueMetaInterface.STORAGE_TYPE_NORMAL ); + v.setStorageMetadata( storageMeta ); + v.setStorageType( ValueMetaInterface.STORAGE_TYPE_BINARY_STRING ); + } + } catch ( KettlePluginException e ) { + throw new KettleStepException( "Unable to clone meta for lazy conversion: " + Const.CR + v, e ); + } + } + } + } + + public String getXML() { + StringBuffer retval = new StringBuffer(); + + retval.append( " " + + XMLHandler.addTagValue( "connection", databaseMeta == null ? "" : databaseMeta.getName() ) ); + retval.append( " " + XMLHandler.addTagValue( "sql", sql ) ); + retval.append( " " + XMLHandler.addTagValue( "limit", rowLimit ) ); + StreamInterface infoStream = getStepIOMeta().getInfoStreams().get( 0 ); + retval.append( " " + XMLHandler.addTagValue( "lookup", infoStream.getStepname() ) ); + retval.append( " " + XMLHandler.addTagValue( "execute_each_row", executeEachInputRow ) ); + retval.append( " " + XMLHandler.addTagValue( "variables_active", variableReplacementActive ) ); + retval.append( " " + XMLHandler.addTagValue( "lazy_conversion_active", lazyConversionActive ) ); + + return retval.toString(); + } + + public void readRep( Repository rep, IMetaStore metaStore, ObjectId id_step, List databases ) throws KettleException { + try { + databaseMeta = rep.loadDatabaseMetaFromStepAttribute( id_step, "id_connection", databases ); + + sql = rep.getStepAttributeString( id_step, "sql" ); + rowLimit = rep.getStepAttributeString( id_step, "limit" ); + if ( rowLimit == null ) { + rowLimit = Long.toString( rep.getStepAttributeInteger( id_step, "limit" ) ); + } + + String lookupFromStepname = rep.getStepAttributeString( id_step, "lookup" ); + StreamInterface infoStream = getStepIOMeta().getInfoStreams().get( 0 ); + infoStream.setSubject( lookupFromStepname ); + + executeEachInputRow = rep.getStepAttributeBoolean( id_step, "execute_each_row" ); + variableReplacementActive = rep.getStepAttributeBoolean( id_step, "variables_active" ); + lazyConversionActive = rep.getStepAttributeBoolean( id_step, "lazy_conversion_active" ); + } catch ( Exception e ) { + throw new KettleException( "Unexpected error reading step information from the repository", e ); + } + } + + public void saveRep( Repository rep, IMetaStore metaStore, ObjectId id_transformation, ObjectId id_step ) throws KettleException { + try { + rep.saveDatabaseMetaStepAttribute( id_transformation, id_step, "id_connection", databaseMeta ); + rep.saveStepAttribute( id_transformation, id_step, "sql", sql ); + rep.saveStepAttribute( id_transformation, id_step, "limit", rowLimit ); + StreamInterface infoStream = getStepIOMeta().getInfoStreams().get( 0 ); + rep.saveStepAttribute( id_transformation, id_step, "lookup", infoStream.getStepname() ); + rep.saveStepAttribute( id_transformation, id_step, "execute_each_row", executeEachInputRow ); + rep.saveStepAttribute( id_transformation, id_step, "variables_active", variableReplacementActive ); + rep.saveStepAttribute( id_transformation, id_step, "lazy_conversion_active", lazyConversionActive ); + + // Also, save the step-database relationship! + if ( databaseMeta != null ) { + rep.insertStepDatabase( id_transformation, id_step, databaseMeta.getObjectId() ); + } + } catch ( Exception e ) { + throw new KettleException( "Unable to save step information to the repository for id_step=" + id_step, e ); + } + } + + public void check( List remarks, TransMeta transMeta, StepMeta stepMeta, + RowMetaInterface prev, String[] input, String[] output, RowMetaInterface info, VariableSpace space, + Repository repository, IMetaStore metaStore ) { + CheckResult cr; + + if ( databaseMeta != null ) { + cr = new CheckResult( CheckResultInterface.TYPE_RESULT_OK, "Connection exists", stepMeta ); + remarks.add( cr ); + + Database db = new Database( loggingObject, databaseMeta ); + db.shareVariablesWith( transMeta ); + databases = new Database[] { db }; // keep track of it for canceling purposes... + + try { + db.connect(); + cr = new CheckResult( CheckResultInterface.TYPE_RESULT_OK, "Connection to database OK", stepMeta ); + remarks.add( cr ); + + if ( sql != null && sql.length() != 0 ) { + cr = new CheckResult( CheckResultInterface.TYPE_RESULT_OK, "SQL statement is entered", stepMeta ); + remarks.add( cr ); + } else { + cr = new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, "SQL statement is missing.", stepMeta ); + remarks.add( cr ); + } + } catch ( KettleException e ) { + cr = + new CheckResult( + CheckResultInterface.TYPE_RESULT_ERROR, "An error occurred: " + e.getMessage(), stepMeta ); + remarks.add( cr ); + } finally { + db.disconnect(); + } + } else { + cr = + new CheckResult( + CheckResultInterface.TYPE_RESULT_ERROR, "Please select or create a connection to use", stepMeta ); + remarks.add( cr ); + } + + // See if we have an informative step... + StreamInterface infoStream = getStepIOMeta().getInfoStreams().get( 0 ); + if ( !Const.isEmpty( infoStream.getStepname() ) ) { + boolean found = false; + for ( int i = 0; i < input.length; i++ ) { + if ( infoStream.getStepname().equalsIgnoreCase( input[i] ) ) { + found = true; + } + } + if ( found ) { + cr = + new CheckResult( CheckResultInterface.TYPE_RESULT_OK, "Previous step to read info from [" + + infoStream.getStepname() + "] is found.", stepMeta ); + remarks.add( cr ); + } else { + cr = + new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, "Previous step to read info from [" + + infoStream.getStepname() + "] is not found.", stepMeta ); + remarks.add( cr ); + } + + // Count the number of ? in the SQL string: + int count = 0; + for ( int i = 0; i < sql.length(); i++ ) { + char c = sql.charAt( i ); + if ( c == '\'' ) { // skip to next quote! + do { + i++; + c = sql.charAt( i ); + } while ( c != '\'' ); + } + if ( c == '?' ) { + count++; + } + } + // Verify with the number of informative fields... + if ( info != null ) { + if ( count == info.size() ) { + cr = + new CheckResult( CheckResultInterface.TYPE_RESULT_OK, "This step is expecting and receiving " + + info.size() + " fields of input from the previous step.", stepMeta ); + remarks.add( cr ); + } else { + cr = + new CheckResult( + CheckResultInterface.TYPE_RESULT_ERROR, "This step is receiving " + + info.size() + " but not the expected " + count + + " fields of input from the previous step.", stepMeta ); + remarks.add( cr ); + } + } else { + cr = + new CheckResult( + CheckResultInterface.TYPE_RESULT_ERROR, "Input step name is not recognized!", stepMeta ); + remarks.add( cr ); + } + } else { + if ( input.length > 0 ) { + cr = + new CheckResult( + CheckResultInterface.TYPE_RESULT_ERROR, "Step is not expecting info from input steps.", stepMeta ); + remarks.add( cr ); + } else { + cr = + new CheckResult( + CheckResultInterface.TYPE_RESULT_OK, "No input expected, no input provided.", stepMeta ); + remarks.add( cr ); + } + + } + } + + /** + * @param steps + * optionally search the info step in a list of steps + */ + public void searchInfoAndTargetSteps( List steps ) { + for ( StreamInterface stream : getStepIOMeta().getInfoStreams() ) { + stream.setStepMeta( StepMeta.findStep( steps, (String) stream.getSubject() ) ); + } + } + + public StepInterface getStep( StepMeta stepMeta, StepDataInterface stepDataInterface, int cnr, + TransMeta transMeta, Trans trans ) { + return new TableInput( stepMeta, stepDataInterface, cnr, transMeta, trans ); + } + + public StepDataInterface getStepData() { + return new TableInputData(); + } + + @Override + public void analyseImpact( List impact, TransMeta transMeta, StepMeta stepMeta, + RowMetaInterface prev, String[] input, String[] output, RowMetaInterface info, Repository repository, + IMetaStore metaStore ) throws KettleStepException { + + if ( stepMeta.getName().equalsIgnoreCase( "cdc_cust" ) ) { + System.out.println( "HERE!" ); + } + + // Find the lookupfields... + RowMetaInterface out = new RowMeta(); + // TODO: this builds, but does it work in all cases. + getFields( out, stepMeta.getName(), new RowMetaInterface[] { info }, null, transMeta, repository, metaStore ); + + if ( out != null ) { + for ( int i = 0; i < out.size(); i++ ) { + ValueMetaInterface outvalue = out.getValueMeta( i ); + DatabaseImpact ii = + new DatabaseImpact( + DatabaseImpact.TYPE_IMPACT_READ, transMeta.getName(), stepMeta.getName(), databaseMeta + .getDatabaseName(), "", outvalue.getName(), outvalue.getName(), stepMeta.getName(), sql, + "read from one or more database tables via SQL statement" ); + impact.add( ii ); + + } + } + } + + public DatabaseMeta[] getUsedDatabaseConnections() { + if ( databaseMeta != null ) { + return new DatabaseMeta[] { databaseMeta }; + } else { + return super.getUsedDatabaseConnections(); + } + } + + /** + * @return Returns the variableReplacementActive. + */ + public boolean isVariableReplacementActive() { + return variableReplacementActive; + } + + /** + * @param variableReplacementActive + * The variableReplacementActive to set. + */ + public void setVariableReplacementActive( boolean variableReplacementActive ) { + this.variableReplacementActive = variableReplacementActive; + } + + /** + * @return the lazyConversionActive + */ + public boolean isLazyConversionActive() { + return lazyConversionActive; + } + + /** + * @param lazyConversionActive + * the lazyConversionActive to set + */ + public void setLazyConversionActive( boolean lazyConversionActive ) { + this.lazyConversionActive = lazyConversionActive; + } + + /** + * Returns the Input/Output metadata for this step. The generator step only produces output, does not accept input! + */ + public StepIOMetaInterface getStepIOMeta() { + if ( ioMeta == null ) { + + ioMeta = new StepIOMeta( true, true, false, false, false, false ); + + StreamInterface stream = + new Stream( + StreamType.INFO, null, BaseMessages.getString( PKG, "TableInputMeta.InfoStream.Description" ), + StreamIcon.INFO, null ); + ioMeta.addStream( stream ); + } + + return ioMeta; + } + + public void resetStepIoMeta() { + // Do nothing, don't reset as there is no need to do this. + } + + /** + * For compatibility, wraps around the standard step IO metadata + * + * @param stepMeta + * The step where you read lookup data from + */ + public void setLookupFromStep( StepMeta stepMeta ) { + getStepIOMeta().getInfoStreams().get( 0 ).setStepMeta( stepMeta ); + } + + /** + * For compatibility, wraps around the standard step IO metadata + * + * @return The step where you read lookup data from + */ + public StepMeta getLookupFromStep() { + return getStepIOMeta().getInfoStreams().get( 0 ).getStepMeta(); + } + + @Override + public TableInputMetaInjection getStepMetaInjectionInterface() { + return new TableInputMetaInjection( this ); + } + + public List extractStepMetadataEntries() throws KettleException { + return getStepMetaInjectionInterface().extractStepMetadataEntries(); + } +} diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/www/Carte.java b/pentaho-kettle/src/main/java/org/pentaho/di/www/Carte.java new file mode 100644 index 0000000..beb051a --- /dev/null +++ b/pentaho-kettle/src/main/java/org/pentaho/di/www/Carte.java @@ -0,0 +1,391 @@ +/*! ****************************************************************************** + * + * Pentaho Data Integration + * + * Copyright (C) 2002-2013 by Pentaho : http://www.pentaho.com + * + ******************************************************************************* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + ******************************************************************************/ + +package org.pentaho.di.www; + +import java.io.PrintWriter; +import java.io.StringWriter; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Properties; + +import org.apache.commons.cli.BasicParser; +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.CommandLineParser; +import org.apache.commons.cli.HelpFormatter; +import org.apache.commons.cli.OptionBuilder; +import org.apache.commons.cli.Options; +import org.apache.commons.cli.ParseException; +import org.apache.commons.lang.StringUtils; +import org.apache.commons.vfs2.FileObject; +import org.pentaho.di.cluster.SlaveServer; +import org.pentaho.di.core.Const; +import org.pentaho.di.core.KettleClientEnvironment; +import org.pentaho.di.core.KettleEnvironment; +import org.pentaho.di.core.logging.KettleLogStore; +import org.pentaho.di.core.logging.LogChannel; +import org.pentaho.di.core.logging.LogChannelInterface; +import org.pentaho.di.core.util.EnvUtil; +import org.pentaho.di.core.vfs.KettleVFS; +import org.pentaho.di.core.xml.XMLHandler; +import org.pentaho.di.i18n.BaseMessages; +import org.w3c.dom.Document; +import org.w3c.dom.Node; + +import com.sun.jersey.api.client.Client; +import com.sun.jersey.api.client.WebResource; +import com.sun.jersey.api.client.config.ClientConfig; +import com.sun.jersey.api.client.config.DefaultClientConfig; +import com.sun.jersey.api.client.filter.HTTPBasicAuthFilter; +import com.sun.jersey.api.json.JSONConfiguration; + +public class Carte { + private static Class PKG = Carte.class; // for i18n purposes, needed by Translator2!! + + private WebServer webServer; + private SlaveServerConfig config; + private boolean allOK; + private static Options options; + + public Carte( final SlaveServerConfig config ) throws Exception { + this( config, null ); + } + + public Carte( final SlaveServerConfig config, Boolean joinOverride ) throws Exception { + this.config = config; + + allOK = true; + + CarteSingleton.setSlaveServerConfig( config ); + LogChannelInterface log = CarteSingleton.getInstance().getLog(); + + final TransformationMap transformationMap = CarteSingleton.getInstance().getTransformationMap(); + transformationMap.setSlaveServerConfig( config ); + final JobMap jobMap = CarteSingleton.getInstance().getJobMap(); + jobMap.setSlaveServerConfig( config ); + List detections = Collections.synchronizedList( new ArrayList() ); + SocketRepository socketRepository = CarteSingleton.getInstance().getSocketRepository(); + + SlaveServer slaveServer = config.getSlaveServer(); + + String hostname = slaveServer.getHostname(); + int port = WebServer.PORT; + if ( !Const.isEmpty( slaveServer.getPort() ) ) { + try { + port = Integer.parseInt( slaveServer.getPort() ); + } catch ( Exception e ) { + log.logError( BaseMessages.getString( PKG, "Carte.Error.CanNotPartPort", slaveServer.getHostname(), "" + port ), + e ); + allOK = false; + } + } + + // TODO: see if we need to keep doing this on a periodic basis. + // The master might be dead or not alive yet at the time we send this message. + // Repeating the registration over and over every few minutes might harden this sort of problems. + // + Properties masterProperties = null; + if ( config.isReportingToMasters() ) { + String propertiesMaster = slaveServer.getPropertiesMasterName(); + for ( final SlaveServer master : config.getMasters() ) { + // Here we use the username/password specified in the slave server section of the configuration. + // This doesn't have to be the same pair as the one used on the master! + // + try { + SlaveServerDetection slaveServerDetection = new SlaveServerDetection( slaveServer.getClient() ); + master.sendXML( slaveServerDetection.getXML(), RegisterSlaveServlet.CONTEXT_PATH + "/" ); + log.logBasic( "Registered this slave server to master slave server [" + master.toString() + "] on address [" + + master.getServerAndPort() + "]" ); + } catch ( Exception e ) { + log.logError( "Unable to register to master slave server [" + master.toString() + "] on address [" + master + .getServerAndPort() + "]" ); + allOK = false; + } + try { + if ( !StringUtils.isBlank( propertiesMaster ) && propertiesMaster.equalsIgnoreCase( master.getName() ) ) { + if ( masterProperties != null ) { + log.logError( "More than one primary master server. Master name is " + propertiesMaster ); + } else { + masterProperties = master.getKettleProperties(); + log.logBasic( "Got properties from master server [" + master.toString() + "], address [" + master + .getServerAndPort() + "]" ); + } + } + } catch ( Exception e ) { + log.logError( "Unable to get properties from master server [" + master.toString() + "], address [" + master + .getServerAndPort() + "]" ); + allOK = false; + } + } + } + if ( masterProperties != null ) { + EnvUtil.applyKettleProperties( masterProperties, slaveServer.isOverrideExistingProperties() ); + } + + // If we need to time out finished or idle objects, we should create a timer in the background to clean + // this is done automatically now + // CarteSingleton.installPurgeTimer(config, log, transformationMap, jobMap); + + if ( allOK ) { + boolean shouldJoin = config.isJoining(); + if ( joinOverride != null ) { + shouldJoin = joinOverride; + } + + this.webServer = + new WebServer( log, transformationMap, jobMap, socketRepository, detections, hostname, port, shouldJoin, + config.getPasswordFile(), slaveServer.getSslConfig() ); + } + } + + public static void main( String[] args ) { + try { + parseAndRunCommand( args ); + } catch ( Exception e ) { + e.printStackTrace(); + } + } + + @SuppressWarnings( "static-access" ) + private static void parseAndRunCommand( String[] args ) throws Exception { + options = new Options(); + options.addOption( OptionBuilder.withLongOpt( "stop" ).withDescription( BaseMessages.getString( PKG, + "Carte.ParamDescription.stop" ) ).hasArg( false ).isRequired( false ).create( 's' ) ); + options.addOption( OptionBuilder.withLongOpt( "userName" ).withDescription( BaseMessages.getString( PKG, + "Carte.ParamDescription.userName" ) ).hasArg( true ).isRequired( false ).create( 'u' ) ); + options.addOption( OptionBuilder.withLongOpt( "password" ).withDescription( BaseMessages.getString( PKG, + "Carte.ParamDescription.password" ) ).hasArg( true ).isRequired( false ).create( 'p' ) ); + options.addOption( OptionBuilder.withLongOpt( "help" ).withDescription( BaseMessages.getString( PKG, + "Carte.ParamDescription.help" ) ).create( 'h' ) ); + + CommandLineParser parser = new BasicParser(); + CommandLine cmd = parser.parse( options, args ); + + if ( cmd.hasOption( 'h' ) ) { + displayHelpAndAbort(); + } + + String[] arguments = cmd.getArgs(); + boolean usingConfigFile = false; + + // Load from an xml file that describes the complete configuration... + // + SlaveServerConfig config = null; + if ( arguments.length == 1 && !Const.isEmpty( arguments[0] ) ) { + if ( cmd.hasOption( 's' ) ) { + throw new Carte.CarteCommandException( BaseMessages.getString( PKG, "Carte.Error.illegalStop" ) ); + } + usingConfigFile = true; + FileObject file = KettleVFS.getFileObject( arguments[0] ); + Document document = XMLHandler.loadXMLFile( file ); + setKettleEnvironment(); // Must stand up server now to allow decryption of password + Node configNode = XMLHandler.getSubNode( document, SlaveServerConfig.XML_TAG ); + config = new SlaveServerConfig( new LogChannel( "Slave server config" ), configNode ); + if ( config.getAutoSequence() != null ) { + config.readAutoSequences(); + } + config.setFilename( arguments[0] ); + } + if ( arguments.length == 2 && !Const.isEmpty( arguments[0] ) && !Const.isEmpty( arguments[1] ) ) { + String hostname = arguments[0]; + String port = arguments[1]; + + if ( cmd.hasOption( 's' ) ) { + String user = cmd.getOptionValue( 'u' ); + String password = cmd.getOptionValue( 'p' ); + shutdown( hostname, port, user, password ); + System.exit( 0 ); + } + + SlaveServer slaveServer = new SlaveServer( hostname + ":" + port, hostname, port, null, null ); + + config = new SlaveServerConfig(); + config.setSlaveServer( slaveServer ); + } + + // Nothing configured: show the usage + // + if ( config == null ) { + displayHelpAndAbort(); + } + + if ( !usingConfigFile ) { + setKettleEnvironment(); + } + runCarte( config ); + } + + private static void setKettleEnvironment() throws Exception { + KettleClientEnvironment.getInstance().setClient( KettleClientEnvironment.ClientType.CARTE ); + KettleEnvironment.init(); + } + + public static void runCarte( SlaveServerConfig config ) throws Exception { + KettleLogStore.init( config.getMaxLogLines(), config.getMaxLogTimeoutMinutes() ); + + config.setJoining( true ); + + Carte carte = new Carte( config, false ); + CarteSingleton.setCarte( carte ); + + carte.getWebServer().join(); + } + + /** + * @return the webServer + */ + public WebServer getWebServer() { + return webServer; + } + + /** + * @param webServer + * the webServer to set + */ + public void setWebServer( WebServer webServer ) { + this.webServer = webServer; + } + + /** + * @return the slave server (Carte) configuration + */ + public SlaveServerConfig getConfig() { + return config; + } + + /** + * @param config + * the slave server (Carte) configuration + */ + public void setConfig( SlaveServerConfig config ) { + this.config = config; + } + + private static void displayHelpAndAbort() { + HelpFormatter formatter = new HelpFormatter(); + String optionsHelp = getOptionsHelpForUsage(); + String header = + BaseMessages.getString( PKG, "Carte.Usage.Text" ) + optionsHelp + "\nor\n" + BaseMessages.getString( PKG, + "Carte.Usage.Text2" ) + "\n\n" + BaseMessages.getString( PKG, "Carte.MainDescription" ); + + StringWriter stringWriter = new StringWriter(); + PrintWriter printWriter = new PrintWriter( stringWriter ); + formatter.printHelp( printWriter, 80, "CarteDummy", header, options, 5, 5, "", false ); + System.err.println( stripOff( stringWriter.toString(), "usage: CarteDummy" ) ); + + System.err.println( BaseMessages.getString( PKG, "Carte.Usage.Example" ) + ": Carte 127.0.0.1 8080" ); + System.err.println( BaseMessages.getString( PKG, "Carte.Usage.Example" ) + ": Carte 192.168.1.221 8081" ); + System.err.println(); + System.err.println( BaseMessages.getString( PKG, "Carte.Usage.Example" ) + ": Carte /foo/bar/carte-config.xml" ); + System.err.println( BaseMessages.getString( PKG, "Carte.Usage.Example" ) + + ": Carte http://www.example.com/carte-config.xml" ); + System.err.println( BaseMessages.getString( PKG, "Carte.Usage.Example" ) + + ": Carte 127.0.0.1 8080 -s -u cluster -p cluster" ); + + System.exit( 1 ); + } + + private static String getOptionsHelpForUsage() { + HelpFormatter formatter = new HelpFormatter(); + StringWriter stringWriter = new StringWriter(); + PrintWriter printWriter = new PrintWriter( stringWriter ); + formatter.printUsage( printWriter, 999, "", options ); + return stripOff( stringWriter.toString(), "usage: " ); // Strip off the "usage:" so it can be localized + } + + private static String stripOff( String target, String strip ) { + return target.substring( target.indexOf( strip ) + strip.length() ); + } + + private static void shutdown( String hostname, String port, String username, String password ) { + try { + callStopCarteRestService( hostname, port, username, password ); + } catch ( Exception e ) { + e.printStackTrace(); + } + } + + /** + * Checks that Carte is running and if so, shuts down the Carte server + * + * @param hostname + * @param port + * @param username + * @param password + * @throws ParseException + * @throws CarteCommandException + */ + private static void callStopCarteRestService( String hostname, String port, String username, String password ) + throws ParseException, CarteCommandException { + // get information about the remote connection + try { + ClientConfig clientConfig = new DefaultClientConfig(); + clientConfig.getFeatures().put( JSONConfiguration.FEATURE_POJO_MAPPING, Boolean.TRUE ); + Client client = Client.create( clientConfig ); + client.addFilter( new HTTPBasicAuthFilter( username, password ) ); + + // check if the user can access the carte server. Don't really need this call but may want to check it's output at + // some point + String contextURL = "http://" + hostname + ":" + port + "/kettle"; + WebResource resource = client.resource( contextURL + "/status/?xml=Y" ); + String response = resource.get( String.class ); + if ( response == null || !response.contains( "" ) ) { + throw new Carte.CarteCommandException( BaseMessages.getString( PKG, "Carte.Error.NoServerFound", hostname, "" + + port ) ); + } + + // This is the call that matters + resource = client.resource( contextURL + "/stopCarte" ); + response = resource.get( String.class ); + if ( response == null || !response.contains( "Shutting Down" ) ) { + throw new Carte.CarteCommandException( BaseMessages.getString( PKG, "Carte.Error.NoShutdown", hostname, "" + + port ) ); + } + } catch ( Exception e ) { + throw new Carte.CarteCommandException( BaseMessages.getString( PKG, "Carte.Error.NoServerFound", hostname, "" + + port ), e ); + } + } + + /** + * Exception generated when command line fails + */ + public static class CarteCommandException extends Exception { + private static final long serialVersionUID = 1L; + + public CarteCommandException() { + } + + public CarteCommandException( final String message ) { + super( message ); + } + + public CarteCommandException( final String message, final Throwable cause ) { + super( message, cause ); + } + + public CarteCommandException( final Throwable cause ) { + super( cause ); + } + } +} diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/www/CarteSingleton.java b/pentaho-kettle/src/main/java/org/pentaho/di/www/CarteSingleton.java new file mode 100644 index 0000000..c932be2 --- /dev/null +++ b/pentaho-kettle/src/main/java/org/pentaho/di/www/CarteSingleton.java @@ -0,0 +1,297 @@ +/*! ****************************************************************************** + * + * Pentaho Data Integration + * + * Copyright (C) 2002-2013 by Pentaho : http://www.pentaho.com + * + ******************************************************************************* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + ******************************************************************************/ + +package org.pentaho.di.www; + +import java.util.ArrayList; +import java.util.List; +import java.util.Timer; +import java.util.TimerTask; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.pentaho.di.cluster.SlaveServer; +import org.pentaho.di.core.Const; +import org.pentaho.di.core.KettleEnvironment; +import org.pentaho.di.core.exception.KettleException; +import org.pentaho.di.core.logging.KettleLogStore; +import org.pentaho.di.core.logging.LogChannel; +import org.pentaho.di.core.logging.LogChannelInterface; +import org.pentaho.di.core.logging.LogLevel; +import org.pentaho.di.core.logging.LoggingObjectType; +import org.pentaho.di.core.logging.LoggingRegistry; +import org.pentaho.di.core.logging.SimpleLoggingObject; +import org.pentaho.di.core.util.EnvUtil; +import org.pentaho.di.i18n.BaseMessages; +import org.pentaho.di.job.Job; +import org.pentaho.di.trans.Trans; +import org.pentaho.di.trans.TransConfiguration; +import org.pentaho.di.trans.TransExecutionConfiguration; + +public class CarteSingleton { + + private static Class PKG = Carte.class; // for i18n purposes, needed by Translator2!! + + private static SlaveServerConfig slaveServerConfig; + private static CarteSingleton carteSingleton; + private static Carte carte; + + private LogChannelInterface log; + + private TransformationMap transformationMap; + private JobMap jobMap; + private List detections; + private SocketRepository socketRepository; + + private CarteSingleton( SlaveServerConfig config ) throws KettleException { + KettleEnvironment.init(); + KettleLogStore.init( config.getMaxLogLines(), config.getMaxLogTimeoutMinutes() ); + + this.log = new LogChannel( "Carte" ); + transformationMap = new TransformationMap(); + transformationMap.setSlaveServerConfig( config ); + jobMap = new JobMap(); + jobMap.setSlaveServerConfig( config ); + detections = new ArrayList(); + socketRepository = new SocketRepository( log ); + + installPurgeTimer( config, log, transformationMap, jobMap ); + + SlaveServer slaveServer = config.getSlaveServer(); + if ( slaveServer != null ) { + int port = WebServer.PORT; + if ( !Const.isEmpty( slaveServer.getPort() ) ) { + try { + port = Integer.parseInt( slaveServer.getPort() ); + } catch ( Exception e ) { + log.logError( BaseMessages.getString( PKG, "Carte.Error.CanNotPartPort", slaveServer.getHostname(), "" + + port ), e ); + } + } + + // TODO: see if we need to keep doing this on a periodic basis. + // The master might be dead or not alive yet at the time we send this + // message. + // Repeating the registration over and over every few minutes might + // harden this sort of problems. + // + if ( config.isReportingToMasters() ) { + String hostname = slaveServer.getHostname(); + final SlaveServer client = + new SlaveServer( "Dynamic slave [" + hostname + ":" + port + "]", hostname, "" + port, slaveServer + .getUsername(), slaveServer.getPassword() ); + for ( final SlaveServer master : config.getMasters() ) { + // Here we use the username/password specified in the slave + // server section of the configuration. + // This doesn't have to be the same pair as the one used on the + // master! + // + try { + SlaveServerDetection slaveServerDetection = new SlaveServerDetection( client ); + master.sendXML( slaveServerDetection.getXML(), RegisterSlaveServlet.CONTEXT_PATH + "/" ); + log.logBasic( "Registered this slave server to master slave server [" + + master.toString() + "] on address [" + master.getServerAndPort() + "]" ); + } catch ( Exception e ) { + log.logError( "Unable to register to master slave server [" + + master.toString() + "] on address [" + master.getServerAndPort() + "]" ); + } + } + } + } + } + + public static void installPurgeTimer( final SlaveServerConfig config, final LogChannelInterface log, + final TransformationMap transformationMap, final JobMap jobMap ) { + + final int objectTimeout; + String systemTimeout = EnvUtil.getSystemProperty( Const.KETTLE_CARTE_OBJECT_TIMEOUT_MINUTES, null ); + + // The value specified in XML takes precedence over the environment variable! + // + if ( config.getObjectTimeoutMinutes() > 0 ) { + objectTimeout = config.getObjectTimeoutMinutes(); + } else if ( !Const.isEmpty( systemTimeout ) ) { + objectTimeout = Const.toInt( systemTimeout, 1440 ); + } else { + objectTimeout = 24 * 60; // 1440 : default is a one day time-out + } + + // If we need to time out finished or idle objects, we should create a timer + // in the background to clean + // + if ( objectTimeout > 0 ) { + + log.logBasic( "Installing timer to purge stale objects after " + objectTimeout + " minutes." ); + + Timer timer = new Timer( true ); + + final AtomicBoolean busy = new AtomicBoolean( false ); + TimerTask timerTask = new TimerTask() { + public void run() { + if ( !busy.get() ) { + busy.set( true ); + + try { + // Check all transformations... + // + for ( CarteObjectEntry entry : transformationMap.getTransformationObjects() ) { + Trans trans = transformationMap.getTransformation( entry ); + + // See if the transformation is finished or stopped. + // + if ( trans != null && ( trans.isFinished() || trans.isStopped() ) && trans.getLogDate() != null ) { + // check the last log time + // + int diffInMinutes = + (int) Math.floor( ( System.currentTimeMillis() - trans.getLogDate().getTime() ) / 60000 ); + if ( diffInMinutes >= objectTimeout ) { + // Let's remove this from the transformation map... + // + transformationMap.removeTransformation( entry ); + + // Remove the logging information from the log registry & central log store + // + LoggingRegistry.getInstance().removeIncludingChildren( trans.getLogChannelId() ); + KettleLogStore.discardLines( trans.getLogChannelId(), false ); + + // transformationMap.deallocateServerSocketPorts(entry); + + log.logMinimal( "Cleaned up transformation " + + entry.getName() + " with id " + entry.getId() + " from " + trans.getLogDate() + + ", diff=" + diffInMinutes ); + } + } + } + + // And the jobs... + // + for ( CarteObjectEntry entry : jobMap.getJobObjects() ) { + Job job = jobMap.getJob( entry ); + + // See if the job is finished or stopped. + // + if ( job != null && ( job.isFinished() || job.isStopped() ) && job.getLogDate() != null ) { + // check the last log time + // + int diffInMinutes = + (int) Math.floor( ( System.currentTimeMillis() - job.getLogDate().getTime() ) / 60000 ); + if ( diffInMinutes >= objectTimeout ) { + // Let's remove this from the job map... + // + jobMap.removeJob( entry ); + log.logMinimal( "Cleaned up job " + + entry.getName() + " with id " + entry.getId() + " from " + job.getLogDate() ); + } + } + } + + } finally { + busy.set( false ); + } + } + } + }; + + // Search for stale objects every 20 seconds: + // + timer.schedule( timerTask, 20000, 20000 ); + } + } + + public static CarteSingleton getInstance() { + try { + if ( carteSingleton == null ) { + if ( slaveServerConfig == null ) { + slaveServerConfig = new SlaveServerConfig(); + SlaveServer slaveServer = new SlaveServer(); + slaveServerConfig.setSlaveServer( slaveServer ); + } + + carteSingleton = new CarteSingleton( slaveServerConfig ); + + String carteObjectId = UUID.randomUUID().toString(); + SimpleLoggingObject servletLoggingObject = + new SimpleLoggingObject( "CarteSingleton", LoggingObjectType.CARTE, null ); + servletLoggingObject.setContainerObjectId( carteObjectId ); + servletLoggingObject.setLogLevel( LogLevel.BASIC ); + + return carteSingleton; + } else { + return carteSingleton; + } + } catch ( KettleException ke ) { + throw new RuntimeException( ke ); + } + } + + public TransformationMap getTransformationMap() { + return transformationMap; + } + + public void setTransformationMap( TransformationMap transformationMap ) { + this.transformationMap = transformationMap; + } + + public JobMap getJobMap() { + return jobMap; + } + + public void setJobMap( JobMap jobMap ) { + this.jobMap = jobMap; + } + + public List getDetections() { + return detections; + } + + public void setDetections( List detections ) { + this.detections = detections; + } + + public SocketRepository getSocketRepository() { + return socketRepository; + } + + public void setSocketRepository( SocketRepository socketRepository ) { + this.socketRepository = socketRepository; + } + + public static SlaveServerConfig getSlaveServerConfig() { + return slaveServerConfig; + } + + public static void setSlaveServerConfig( SlaveServerConfig slaveServerConfig ) { + CarteSingleton.slaveServerConfig = slaveServerConfig; + } + + public static void setCarte( Carte carte ) { + CarteSingleton.carte = carte; + } + + public static Carte getCarte() { + return CarteSingleton.carte; + } + + public LogChannelInterface getLog() { + return log; + } +} diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/www/WebServer.java b/pentaho-kettle/src/main/java/org/pentaho/di/www/WebServer.java new file mode 100644 index 0000000..857de35 --- /dev/null +++ b/pentaho-kettle/src/main/java/org/pentaho/di/www/WebServer.java @@ -0,0 +1,521 @@ +/*! ****************************************************************************** + * + * Pentaho Data Integration + * + * Copyright (C) 2002-2013 by Pentaho : http://www.pentaho.com + * + ******************************************************************************* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + ******************************************************************************/ + +package org.pentaho.di.www; + +import com.sun.jersey.spi.container.servlet.ServletContainer; +import org.eclipse.jetty.plus.jaas.JAASLoginService; +import org.eclipse.jetty.security.ConstraintMapping; +import org.eclipse.jetty.security.ConstraintSecurityHandler; +import org.eclipse.jetty.security.HashLoginService; +import org.eclipse.jetty.server.Connector; +import org.eclipse.jetty.server.Handler; +import org.eclipse.jetty.server.Server; +import org.eclipse.jetty.server.UserIdentity; +import org.eclipse.jetty.server.bio.SocketConnector; +import org.eclipse.jetty.server.handler.ContextHandlerCollection; +import org.eclipse.jetty.server.handler.HandlerList; +import org.eclipse.jetty.server.handler.ResourceHandler; +import org.eclipse.jetty.server.ssl.SslSocketConnector; +import org.eclipse.jetty.servlet.ServletContextHandler; +import org.eclipse.jetty.servlet.ServletHolder; +import org.eclipse.jetty.util.security.Constraint; +import org.eclipse.jetty.util.security.Credential; +import org.eclipse.jetty.util.security.Password; +import org.pentaho.di.cluster.SlaveServer; +import org.pentaho.di.core.Const; +import org.pentaho.di.core.KettleEnvironment; +import org.pentaho.di.core.exception.KettleException; +import org.pentaho.di.core.extension.ExtensionPointHandler; +import org.pentaho.di.core.extension.KettleExtensionPoint; +import org.pentaho.di.core.logging.LogChannelInterface; +import org.pentaho.di.core.plugins.CartePluginType; +import org.pentaho.di.core.plugins.PluginInterface; +import org.pentaho.di.core.plugins.PluginRegistry; +import org.pentaho.di.i18n.BaseMessages; + +import javax.servlet.Servlet; +import java.io.File; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Date; +import java.util.List; +import java.util.Timer; +import java.util.TimerTask; + +public class WebServer { + private static Class PKG = WebServer.class; // for i18n purposes, needed by Translator2!! + + private LogChannelInterface log; + + public static final int PORT = 80; + + private Server server; + + private TransformationMap transformationMap; + private JobMap jobMap; + private List detections; + private SocketRepository socketRepository; + + private String hostname; + private int port; + + private Timer slaveMonitoringTimer; + + private String passwordFile; + private WebServerShutdownHook webServerShutdownHook; + private IWebServerShutdownHandler webServerShutdownHandler = new DefaultWebServerShutdownHandler(); + + private SslConfiguration sslConfig; + + public WebServer( LogChannelInterface log, TransformationMap transformationMap, JobMap jobMap, + SocketRepository socketRepository, List detections, String hostname, int port, boolean join, + String passwordFile ) throws Exception { + this( log, transformationMap, jobMap, socketRepository, detections, hostname, port, join, passwordFile, null ); + } + + public WebServer( LogChannelInterface log, TransformationMap transformationMap, JobMap jobMap, + SocketRepository socketRepository, List detections, String hostname, int port, boolean join, + String passwordFile, SslConfiguration sslConfig ) throws Exception { + this.log = log; + this.transformationMap = transformationMap; + this.jobMap = jobMap; + this.socketRepository = socketRepository; + this.detections = detections; + this.hostname = hostname; + this.port = port; + this.passwordFile = passwordFile; + this.sslConfig = sslConfig; + + startServer(); + + // Start the monitoring of the registered slave servers... + // + startSlaveMonitoring(); + + webServerShutdownHook = new WebServerShutdownHook( this ); + Runtime.getRuntime().addShutdownHook( webServerShutdownHook ); + + try { + ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.CarteStartup.id, this ); + } catch ( KettleException e ) { + // Log error but continue regular operations to make sure Carte continues to run properly + // + log.logError( "Error calling extension point CarteStartup", e ); + } + + if ( join ) { + server.join(); + } + } + + public WebServer( LogChannelInterface log, TransformationMap transformationMap, JobMap jobMap, + SocketRepository socketRepository, List slaveServers, String hostname, int port ) + throws Exception { + this( log, transformationMap, jobMap, socketRepository, slaveServers, hostname, port, true ); + } + + public WebServer( LogChannelInterface log, TransformationMap transformationMap, JobMap jobMap, + SocketRepository socketRepository, List detections, String hostname, int port, + boolean join ) throws Exception { + this( log, transformationMap, jobMap, socketRepository, detections, hostname, port, join, null, null ); + } + + public Server getServer() { + return server; + } + + public void startServer() throws Exception { + server = new Server(); + + List roles = new ArrayList(); + roles.add( Constraint.ANY_ROLE ); + + // Set up the security handler, optionally with JAAS + // + ConstraintSecurityHandler securityHandler = new ConstraintSecurityHandler(); + + if ( System.getProperty( "loginmodulename" ) != null + && System.getProperty( "java.security.auth.login.config" ) != null ) { + JAASLoginService jaasLoginService = new JAASLoginService( "Kettle" ); + jaasLoginService.setLoginModuleName( System.getProperty( "loginmodulename" ) ); + securityHandler.setLoginService( jaasLoginService ); + } else { + roles.add( "default" ); + HashLoginService hashLoginService; + SlaveServer slaveServer = transformationMap.getSlaveServerConfig().getSlaveServer(); + if ( !Const.isEmpty( slaveServer.getPassword() ) ) { + hashLoginService = new HashLoginService( "Kettle" ); + hashLoginService.putUser( slaveServer.getUsername(), new Password( slaveServer.getPassword() ), + new String[] { "default" } ); + } else { + // See if there is a kettle.pwd file in the KETTLE_HOME directory: + if ( Const.isEmpty( passwordFile ) ) { + File homePwdFile = new File( Const.getKettleCartePasswordFile() ); + if ( homePwdFile.exists() ) { + passwordFile = Const.getKettleCartePasswordFile(); + } else { + passwordFile = Const.getKettleLocalCartePasswordFile(); + } + } + hashLoginService = new HashLoginService( "Kettle", passwordFile ) { + @Override public synchronized UserIdentity putUser( String userName, Credential credential, String[] roles ) { + List newRoles = new ArrayList(); + newRoles.add( "default" ); + Collections.addAll( newRoles, roles ); + return super.putUser( userName, credential, newRoles.toArray( new String[newRoles.size()] ) ); + } + }; + } + securityHandler.setLoginService( hashLoginService ); + } + + Constraint constraint = new Constraint(); + constraint.setName( Constraint.__BASIC_AUTH ); + constraint.setRoles( roles.toArray( new String[roles.size()] ) ); + constraint.setAuthenticate( true ); + + ConstraintMapping constraintMapping = new ConstraintMapping(); + constraintMapping.setConstraint( constraint ); + constraintMapping.setPathSpec( "/*" ); + + securityHandler.setConstraintMappings( new ConstraintMapping[] { constraintMapping } ); + + // Add all the servlets defined in kettle-servlets.xml ... + // + ContextHandlerCollection contexts = new ContextHandlerCollection(); + + // Root + // + ServletContextHandler + root = + new ServletContextHandler( contexts, GetRootServlet.CONTEXT_PATH, ServletContextHandler.SESSIONS ); + GetRootServlet rootServlet = new GetRootServlet(); + rootServlet.setJettyMode( true ); + root.addServlet( new ServletHolder( rootServlet ), "/*" ); + + PluginRegistry pluginRegistry = PluginRegistry.getInstance(); + List plugins = pluginRegistry.getPlugins( CartePluginType.class ); + for ( PluginInterface plugin : plugins ) { + + CartePluginInterface servlet = pluginRegistry.loadClass( plugin, CartePluginInterface.class ); + servlet.setup( transformationMap, jobMap, socketRepository, detections ); + servlet.setJettyMode( true ); + + ServletContextHandler servletContext = + new ServletContextHandler( contexts, getContextPath( servlet ), ServletContextHandler.SESSIONS ); + ServletHolder servletHolder = new ServletHolder( (Servlet) servlet ); + servletContext.addServlet( servletHolder, "/*" ); + } + + // setup jersey (REST) + ServletHolder jerseyServletHolder = new ServletHolder( ServletContainer.class ); + jerseyServletHolder.setInitParameter( "com.sun.jersey.config.property.resourceConfigClass", + "com.sun.jersey.api.core.PackagesResourceConfig" ); + jerseyServletHolder.setInitParameter( "com.sun.jersey.config.property.packages", "org.pentaho.di.www.jaxrs" ); + root.addServlet( jerseyServletHolder, "/api/*" ); + + // setup static resource serving + // ResourceHandler mobileResourceHandler = new ResourceHandler(); + // mobileResourceHandler.setWelcomeFiles(new String[]{"index.html"}); + // mobileResourceHandler.setResourceBase(getClass().getClassLoader(). + // getResource("org/pentaho/di/www/mobile").toExternalForm()); + // Context mobileContext = new Context(contexts, "/mobile", Context.SESSIONS); + // mobileContext.setHandler(mobileResourceHandler); + + // Allow png files to be shown for transformations and jobs... + // + ResourceHandler resourceHandler = new ResourceHandler(); + resourceHandler.setResourceBase( "temp" ); + // add all handlers/contexts to server + + HandlerList handlers = new HandlerList(); + handlers.setHandlers( new Handler[] { contexts, resourceHandler } ); + securityHandler.setHandler( handlers ); + + server.setHandler( securityHandler ); + + // Start execution + createListeners(); + + server.start(); + } + + public String getContextPath( CartePluginInterface servlet ) { + String contextPath = servlet.getContextPath(); + if ( !contextPath.startsWith( "/kettle" ) ) { + contextPath = "/kettle" + contextPath; + } + return contextPath; + } + + public void join() throws InterruptedException { + server.join(); + } + + public void stopServer() { + + webServerShutdownHook.setShuttingDown( true ); + + try { + ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.CarteShutdown.id, this ); + } catch ( KettleException e ) { + // Log error but continue regular operations to make sure Carte can be shut down properly. + // + log.logError( "Error calling extension point CarteStartup", e ); + } + + try { + if ( server != null ) { + + // Stop the monitoring timer + // + if ( slaveMonitoringTimer != null ) { + slaveMonitoringTimer.cancel(); + slaveMonitoringTimer = null; + } + + // Clean up all the server sockets... + // + socketRepository.closeAll(); + + // Stop the server... + // + server.stop(); + KettleEnvironment.shutdown(); + if ( webServerShutdownHandler != null ) { + webServerShutdownHandler.shutdownWebServer(); + } + } + } catch ( Exception e ) { + log.logError( BaseMessages.getString( PKG, "WebServer.Error.FailedToStop.Title" ), + BaseMessages.getString( PKG, "WebServer.Error.FailedToStop.Msg", "" + e ) ); + } + } + + private void createListeners() { + + SocketConnector connector = getConnector(); + setupJettyOptions( connector ); + connector.setPort( port ); + connector.setHost( hostname ); + connector.setName( BaseMessages.getString( PKG, "WebServer.Log.KettleHTTPListener", hostname ) ); + log.logBasic( BaseMessages.getString( PKG, "WebServer.Log.CreateListener", hostname, "" + port ) ); + + server.setConnectors( new Connector[] { connector } ); + } + + private SocketConnector getConnector() { + if ( sslConfig != null ) { + log.logBasic( BaseMessages.getString( PKG, "WebServer.Log.SslModeUsing" ) ); + SslSocketConnector connector = new SslSocketConnector(); + connector.setKeystore( sslConfig.getKeyStore() ); + connector.setPassword( sslConfig.getKeyStorePassword() ); + connector.setKeyPassword( sslConfig.getKeyPassword() ); + connector.setKeystoreType( sslConfig.getKeyStoreType() ); + return connector; + } else { + return new SocketConnector(); + } + + } + + /** + * Set up jetty options to the connector + * + * @param connector + */ + protected void setupJettyOptions( SocketConnector connector ) { + if ( validProperty( Const.KETTLE_CARTE_JETTY_ACCEPTORS ) ) { + connector.setAcceptors( Integer.parseInt( System.getProperty( Const.KETTLE_CARTE_JETTY_ACCEPTORS ) ) ); + log.logBasic( + BaseMessages.getString( PKG, "WebServer.Log.ConfigOptions", "acceptors", connector.getAcceptors() ) ); + } + + if ( validProperty( Const.KETTLE_CARTE_JETTY_ACCEPT_QUEUE_SIZE ) ) { + connector + .setAcceptQueueSize( Integer.parseInt( System.getProperty( Const.KETTLE_CARTE_JETTY_ACCEPT_QUEUE_SIZE ) ) ); + log.logBasic( BaseMessages + .getString( PKG, "WebServer.Log.ConfigOptions", "acceptQueueSize", connector.getAcceptQueueSize() ) ); + } + + if ( validProperty( Const.KETTLE_CARTE_JETTY_RES_MAX_IDLE_TIME ) ) { + connector.setLowResourceMaxIdleTime( + Integer.parseInt( System.getProperty( Const.KETTLE_CARTE_JETTY_RES_MAX_IDLE_TIME ) ) ); + log.logBasic( BaseMessages.getString( PKG, "WebServer.Log.ConfigOptions", "lowResourcesMaxIdleTime", + connector.getLowResourceMaxIdleTime() ) ); + } + + } + + /** + * Checks if the property is not null or not empty String that can be parseable as int and returns true if it is, + * otherwise false + * + * @param property the property to check + * @return true if the property is not null or not empty String that can be parseable as int, false otherwise + */ + private boolean validProperty( String property ) { + boolean isValid = false; + if ( System.getProperty( property ) != null && System.getProperty( property ).length() > 0 ) { + try { + Integer.parseInt( System.getProperty( property ) ); + isValid = true; + } catch ( NumberFormatException nmbfExc ) { + log.logBasic( BaseMessages + .getString( PKG, "WebServer.Log.ConfigOptionsInvalid", property, System.getProperty( property ) ) ); + } + } + return isValid; + } + + /** + * @return the hostname + */ + public String getHostname() { + return hostname; + } + + /** + * @param hostname the hostname to set + */ + public void setHostname( String hostname ) { + this.hostname = hostname; + } + + /** + * @return the slave server detections + */ + public List getDetections() { + return detections; + } + + /** + * This method registers a timer to check up on all the registered slave servers every X seconds.
+ */ + private void startSlaveMonitoring() { + slaveMonitoringTimer = new Timer( "WebServer Timer" ); + TimerTask timerTask = new TimerTask() { + + public void run() { + for ( SlaveServerDetection slaveServerDetection : detections ) { + SlaveServer slaveServer = slaveServerDetection.getSlaveServer(); + + // See if we can get a status... + // + try { + // TODO: consider making this lighter or retaining more information... + slaveServer.getStatus(); // throws the exception + slaveServerDetection.setActive( true ); + slaveServerDetection.setLastActiveDate( new Date() ); + } catch ( Exception e ) { + slaveServerDetection.setActive( false ); + slaveServerDetection.setLastInactiveDate( new Date() ); + + // TODO: kick it out after a configurable period of time... + } + } + } + }; + slaveMonitoringTimer.schedule( timerTask, 20000, 20000 ); + } + + /** + * @return the socketRepository + */ + public SocketRepository getSocketRepository() { + return socketRepository; + } + + /** + * @param socketRepository the socketRepository to set + */ + public void setSocketRepository( SocketRepository socketRepository ) { + this.socketRepository = socketRepository; + } + + public String getPasswordFile() { + return passwordFile; + } + + public void setPasswordFile( String passwordFile ) { + this.passwordFile = passwordFile; + } + + public LogChannelInterface getLog() { + return log; + } + + public void setLog( LogChannelInterface log ) { + this.log = log; + } + + public TransformationMap getTransformationMap() { + return transformationMap; + } + + public void setTransformationMap( TransformationMap transformationMap ) { + this.transformationMap = transformationMap; + } + + public JobMap getJobMap() { + return jobMap; + } + + public void setJobMap( JobMap jobMap ) { + this.jobMap = jobMap; + } + + public int getPort() { + return port; + } + + public void setPort( int port ) { + this.port = port; + } + + public Timer getSlaveMonitoringTimer() { + return slaveMonitoringTimer; + } + + public void setSlaveMonitoringTimer( Timer slaveMonitoringTimer ) { + this.slaveMonitoringTimer = slaveMonitoringTimer; + } + + public void setServer( Server server ) { + this.server = server; + } + + public void setDetections( List detections ) { + this.detections = detections; + } + + /** + * Can be used to override the default shutdown behavior of performing a System.exit + * + * @param webServerShutdownHandler + */ + public void setWebServerShutdownHandler( IWebServerShutdownHandler webServerShutdownHandler ) { + this.webServerShutdownHandler = webServerShutdownHandler; + } + +} diff --git a/pentaho-kettle/src/main/resources/kettle-servlets.xml b/pentaho-kettle/src/main/resources/kettle-servlets.xml new file mode 100644 index 0000000..bce1b63 --- /dev/null +++ b/pentaho-kettle/src/main/resources/kettle-servlets.xml @@ -0,0 +1,63 @@ + + + + + + + + + + Get the status of the server org.pentaho.di.www.GetStatusServlet + The the status of a transformation org.pentaho.di.www.GetTransStatusServlet + Prepare the execution of a transformation org.pentaho.di.www.PrepareExecutionTransServlet + Start the execution of a transformation org.pentaho.di.www.StartExecutionTransServlet + Prepare and start the execution of a transformation org.pentaho.di.www.StartTransServlet + Pause or continue a transformation org.pentaho.di.www.PauseTransServlet + Stop a transformation org.pentaho.di.www.StopTransServlet + Cleanup a transformation: close remote sockets, ... org.pentaho.di.www.CleanupTransServlet + Add a transformation for execution org.pentaho.di.www.AddTransServlet + Remove a transformation org.pentaho.di.www.RemoveTransServlet + Service for the allocation of server sockets org.pentaho.di.www.AllocateServerSocketServlet + Lists server socket allocation information org.pentaho.di.www.ListServerSocketServlet + Sniff test a transformation step org.pentaho.di.www.SniffStepServlet + Execute (prepare and start) a specific transformation and pass output to the servlet org.pentaho.di.www.ExecuteTransServlet + Generate a PNG image of a transformation org.pentaho.di.www.GetTransImageServlet + Run a transformation directly from a repository org.pentaho.di.www.RunTransServlet + + + + Start a job org.pentaho.di.www.StartJobServlet + Stop a job org.pentaho.di.www.StopJobServlet + Get the status of a job org.pentaho.di.www.GetJobStatusServlet + Add a job to the server org.pentaho.di.www.AddJobServlet + Remove a job from the server org.pentaho.di.www.RemoveJobServlet + Run a job directly from a repository org.pentaho.di.www.RunJobServlet + Execute (prepare and start) a specific job org.pentaho.di.www.ExecuteJobServlet + Generate a PNG image of a job org.pentaho.di.www.GetJobImageServlet + + Add a job to the server org.pentaho.di.www.RegisterJobServlet + Add a transformation to the server org.pentaho.di.www.RegisterTransServlet + Upload a resources export file org.pentaho.di.www.RegisterPackageServlet + + + + + Register a slave server org.pentaho.di.www.RegisterSlaveServlet + List all registered slave servers org.pentaho.di.www.GetSlavesServlet + Stop Carte Server org.pentaho.di.www.StopCarteServlet + Get properties from kettle.properties org.pentaho.di.www.GetPropertiesServlet + + + + Upload a resources export file org.pentaho.di.www.AddExportServlet + + + + Get the next block of values for a sequence org.pentaho.di.www.NextSequenceValueServlet + + diff --git a/pentaho-platform/pom.xml b/pentaho-platform/pom.xml new file mode 100644 index 0000000..9c16b59 --- /dev/null +++ b/pentaho-platform/pom.xml @@ -0,0 +1,62 @@ + + 4.0.0 + + com.github.zhicwu + pdi-cluster + 6.1.0.1-SNAPSHOT + + pentaho-platform + jar + Pentaho BA Server CE + Workarounds for Pentaho BA Server Community Edition. + https://github.com/zhicwu/pdi-cluster + + + ${project.parent.basedir} + 1.7.2 + + + + + pentaho + pentaho-platform-api + ${pentaho-ce.version} + + + pentaho + pentaho-platform-repository + ${pentaho-ce.version} + + + pentaho + pentaho-platform-scheduler + ${pentaho-ce.version} + + + org.quartz-scheduler + quartz + ${quartz.version} + + + + + Apache 2 + http://www.apache.org/licenses/LICENSE-2.0.txt + repo + Apache License Version 2.0 + + + + + scm:git:git@github.com:zhicwu/pdi-cluster.git + scm:git:git@github.com:zhicwu/pdi-cluster.git + https://github.com/zhicwu/pdi-cluster + ${project.version} + + + + + Zhichun Wu + + + \ No newline at end of file diff --git a/pentaho-platform/src/main/java/org/pentaho/platform/scheduler2/quartz/ActionAdapterQuartzJob.java b/pentaho-platform/src/main/java/org/pentaho/platform/scheduler2/quartz/ActionAdapterQuartzJob.java new file mode 100644 index 0000000..12a781f --- /dev/null +++ b/pentaho-platform/src/main/java/org/pentaho/platform/scheduler2/quartz/ActionAdapterQuartzJob.java @@ -0,0 +1,451 @@ +/*! + * This program is free software; you can redistribute it and/or modify it under the + * terms of the GNU Lesser General Public License, version 2.1 as published by the Free Software + * Foundation. + * + * You should have received a copy of the GNU Lesser General Public License along with this + * program; if not, you can obtain a copy at http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html + * or from the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; + * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU Lesser General Public License for more details. + * + * Copyright (c) 2002-2013 Pentaho Corporation.. All rights reserved. + */ + +package org.pentaho.platform.scheduler2.quartz; + +import java.io.OutputStream; +import java.io.Serializable; +import java.text.MessageFormat; +import java.util.Date; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; +import java.util.concurrent.Callable; + +import org.apache.commons.io.IOUtils; +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.pentaho.platform.api.action.IAction; +import org.pentaho.platform.api.action.IPostProcessingAction; +import org.pentaho.platform.api.action.IStreamingAction; +import org.pentaho.platform.api.action.IVarArgsAction; +import org.pentaho.platform.api.engine.IPluginManager; +import org.pentaho.platform.api.engine.PluginBeanException; +import org.pentaho.platform.api.repository.IContentItem; +import org.pentaho.platform.api.repository2.unified.ISourcesStreamEvents; +import org.pentaho.platform.api.repository2.unified.IStreamListener; +import org.pentaho.platform.api.repository2.unified.IUnifiedRepository; +import org.pentaho.platform.api.repository2.unified.RepositoryFile; +import org.pentaho.platform.api.repository2.unified.data.simple.SimpleRepositoryFileData; +import org.pentaho.platform.api.scheduler2.IBackgroundExecutionStreamProvider; +import org.pentaho.platform.api.scheduler2.IBlockoutManager; +import org.pentaho.platform.api.scheduler2.IJobTrigger; +import org.pentaho.platform.api.scheduler2.IScheduler; +import org.pentaho.platform.api.scheduler2.SimpleJobTrigger; +import org.pentaho.platform.engine.core.system.PentahoSystem; +import org.pentaho.platform.engine.security.SecurityHelper; +import org.pentaho.platform.engine.services.solution.ActionSequenceCompatibilityFormatter; +import org.pentaho.platform.scheduler2.blockout.BlockoutAction; +import org.pentaho.platform.scheduler2.email.Emailer; +import org.pentaho.platform.scheduler2.messsages.Messages; +import org.pentaho.platform.util.beans.ActionHarness; +import org.pentaho.platform.util.messages.LocaleHelper; +import org.pentaho.platform.util.web.MimeHelper; +import org.quartz.Job; +import org.quartz.JobDataMap; +import org.quartz.JobExecutionContext; +import org.quartz.JobExecutionException; + +/** + * A Quartz job that is responsible for executing the {@link IAction} referred to in the job context. + * + * @author aphillips + */ +public class ActionAdapterQuartzJob implements Job { + + static final Log log = LogFactory.getLog( ActionAdapterQuartzJob.class ); + private static final long RETRY_COUNT = 6; + private static final long RETRY_SLEEP_AMOUNT = 10000; + + private String outputFilePath = null; + private Object lock = new Object(); + + protected Class resolveClass( JobDataMap jobDataMap ) throws PluginBeanException, JobExecutionException { + String actionClass = jobDataMap.getString( QuartzScheduler.RESERVEDMAPKEY_ACTIONCLASS ); + String actionId = jobDataMap.getString( QuartzScheduler.RESERVEDMAPKEY_ACTIONID ); + + Class clazz = null; + + if ( StringUtils.isEmpty( actionId ) && StringUtils.isEmpty( actionClass ) ) { + throw new LoggingJobExecutionException( Messages.getInstance().getErrorString( + "ActionAdapterQuartzJob.ERROR_0001_REQUIRED_PARAM_MISSING", //$NON-NLS-1$ + QuartzScheduler.RESERVEDMAPKEY_ACTIONCLASS, QuartzScheduler.RESERVEDMAPKEY_ACTIONID ) ); + } + + for ( int i = 0; i < RETRY_COUNT; i++ ) { + try { + if ( !StringUtils.isEmpty( actionId ) ) { + IPluginManager pluginManager = PentahoSystem.get( IPluginManager.class ); + clazz = pluginManager.loadClass( actionId ); + return clazz; + } else if ( !StringUtils.isEmpty( actionClass ) ) { + clazz = Class.forName( actionClass ); + return clazz; + } + } catch ( Throwable t ) { + try { + Thread.sleep( RETRY_SLEEP_AMOUNT ); + } catch ( InterruptedException ie ) { + log.info( ie.getMessage(), ie ); + } + } + } + + // we have failed to locate the class for the actionClass + // and we're giving up waiting for it to become available/registered + // which can typically happen at system startup + throw new LoggingJobExecutionException( Messages.getInstance().getErrorString( + "ActionAdapterQuartzJob.ERROR_0002_FAILED_TO_CREATE_ACTION", //$NON-NLS-1$ + StringUtils.isEmpty( actionId ) ? actionClass : actionId ) ); + } + + @SuppressWarnings( "unchecked" ) + public void execute( JobExecutionContext context ) throws JobExecutionException { + JobDataMap jobDataMap = context.getMergedJobDataMap(); + String actionUser = jobDataMap.getString( QuartzScheduler.RESERVEDMAPKEY_ACTIONUSER ); + + Object bean; + Class actionClass = null; + try { + actionClass = resolveClass( jobDataMap ); + bean = actionClass.newInstance(); + } catch ( Exception e ) { + throw new LoggingJobExecutionException( Messages.getInstance().getErrorString( + "ActionAdapterQuartzJob.ERROR_0002_FAILED_TO_CREATE_ACTION", //$NON-NLS-1$ + ( actionClass == null ) ? "unknown" : actionClass.getName() ), e ); //$NON-NLS-1$ + } + + if ( !( bean instanceof IAction ) ) { + throw new LoggingJobExecutionException( Messages.getInstance().getErrorString( + "ActionAdapterQuartzJob.ERROR_0003_ACTION_WRONG_TYPE", actionClass.getName(), //$NON-NLS-1$ + IAction.class.getName() ) ); + } + + final IAction actionBean = (IAction) bean; + + try { + invokeAction( actionBean, actionUser, context, jobDataMap.getWrappedMap() ); + + } catch ( Throwable t ) { + // ensure that scheduler thread isn't blocked on lock + synchronized ( lock ) { + lock.notifyAll(); + } + + // We should not distinguish between checked and unchecked exceptions here. All job execution failures + // should result in a rethrow of a quartz exception + throw new LoggingJobExecutionException( Messages.getInstance().getErrorString( + "ActionAdapterQuartzJob.ERROR_0004_ACTION_FAILED", actionBean //$NON-NLS-1$ + .getClass().getName() ), t ); + } + } + + protected void invokeAction( final IAction actionBean, final String actionUser, final JobExecutionContext context, + final Map params ) throws Exception { + + final IScheduler scheduler = PentahoSystem.getObjectFactory().get( IScheduler.class, "IScheduler2", null ); + final Map jobParams = new HashMap( params ); // shallow copy + + // remove the scheduling infrastructure properties + params.remove( QuartzScheduler.RESERVEDMAPKEY_ACTIONCLASS ); + params.remove( QuartzScheduler.RESERVEDMAPKEY_ACTIONID ); + params.remove( QuartzScheduler.RESERVEDMAPKEY_ACTIONUSER ); + Object objsp = params.get( QuartzScheduler.RESERVEDMAPKEY_STREAMPROVIDER ); + IBackgroundExecutionStreamProvider sp = null; + if ( objsp != null && IBackgroundExecutionStreamProvider.class.isAssignableFrom( objsp.getClass() ) ) { + sp = (IBackgroundExecutionStreamProvider) objsp; + } + final IBackgroundExecutionStreamProvider streamProvider = sp; + params.remove( QuartzScheduler.RESERVEDMAPKEY_STREAMPROVIDER ); + params.remove( QuartzScheduler.RESERVEDMAPKEY_UIPASSPARAM ); + // The scheduled_fire_time is useful only to the blockoutAction see PDI-10171 + if ( actionBean instanceof BlockoutAction ) { + params.put( IBlockoutManager.SCHEDULED_FIRE_TIME, context.getScheduledFireTime() ); + } + + if ( log.isDebugEnabled() ) { + log.debug( MessageFormat.format( + "Scheduling system invoking action {0} as user {1} with params [ {2} ]", actionBean //$NON-NLS-1$ + .getClass().getName(), actionUser, QuartzScheduler.prettyPrintMap( params ) ) ); + } + + Callable actionBeanRunner = new Callable() { + + public Boolean call() throws Exception { + LocaleHelper.setLocaleOverride( (Locale) params.get( LocaleHelper.USER_LOCALE_PARAM ) ); + // sync job params to the action bean + ActionHarness actionHarness = new ActionHarness( actionBean ); + boolean updateJob = false; + + final Map actionParams = new HashMap(); + actionParams.putAll( params ); + if ( streamProvider != null ) { + actionParams.put( "inputStream", streamProvider.getInputStream() ); + } + actionHarness.setValues( actionParams, new ActionSequenceCompatibilityFormatter() ); + + if ( actionBean instanceof IVarArgsAction ) { + actionParams.remove( "inputStream" ); + actionParams.remove( "outputStream" ); + ( (IVarArgsAction) actionBean ).setVarArgs( actionParams ); + } + + boolean waitForFileCreated = false; + OutputStream stream = null; + + if ( streamProvider != null ) { + actionParams.remove( "inputStream" ); + if ( actionBean instanceof IStreamingAction ) { + streamProvider.setStreamingAction( (IStreamingAction) actionBean ); + } + + // BISERVER-9414 - validate that output path still exist + SchedulerOutputPathResolver resolver = + new SchedulerOutputPathResolver( streamProvider.getOutputPath(), actionUser ); + String outputPath = resolver.resolveOutputFilePath(); + actionParams.put( "useJcr", Boolean.TRUE ); + actionParams.put( "jcrOutputPath", outputPath.substring( 0, outputPath.lastIndexOf( "/" ) ) ); + + if ( !outputPath.equals( streamProvider.getOutputPath() ) ) { + streamProvider.setOutputFilePath( outputPath ); // set fallback path + updateJob = true; // job needs to be deleted and recreated with the new output path + } + + stream = streamProvider.getOutputStream(); + if ( stream instanceof ISourcesStreamEvents ) { + ( (ISourcesStreamEvents) stream ).addListener( new IStreamListener() { + public void fileCreated( final String filePath ) { + synchronized ( lock ) { + outputFilePath = filePath; + lock.notifyAll(); + } + } + } ); + waitForFileCreated = true; + } + actionParams.put( "outputStream", stream ); + // The lineage_id is only useful for the metadata and not needed at this level see PDI-10171 + actionParams.remove( QuartzScheduler.RESERVEDMAPKEY_LINEAGE_ID ); + actionHarness.setValues( actionParams ); + } + + actionBean.execute(); + + if ( stream != null ) { + IOUtils.closeQuietly( stream ); + } + + if ( waitForFileCreated ) { + synchronized ( lock ) { + if ( outputFilePath == null ) { + lock.wait(); + } + } + sendEmail( actionParams, params, outputFilePath ); + } + if ( actionBean instanceof IPostProcessingAction ) { + markContentAsGenerated( (IPostProcessingAction) actionBean ); + } + return updateJob; + } + + private void markContentAsGenerated( IPostProcessingAction actionBean ) { + IUnifiedRepository repo = PentahoSystem.get( IUnifiedRepository.class ); + String lineageId = (String) params.get( QuartzScheduler.RESERVEDMAPKEY_LINEAGE_ID ); + for ( IContentItem contentItem : actionBean.getActionOutputContents() ) { + RepositoryFile sourceFile = repo.getFile( contentItem.getPath() ); + // add metadata iof we have access and we have file + if ( sourceFile != null ) { + Map metadata = repo.getFileMetadata( sourceFile.getId() ); + metadata.put( QuartzScheduler.RESERVEDMAPKEY_LINEAGE_ID, lineageId ); + repo.setFileMetadata( sourceFile.getId(), metadata ); + } + } + } + }; + + boolean requiresUpdate = false; + if ( ( actionUser == null ) || ( actionUser.equals( "system session" ) ) ) { //$NON-NLS-1$ + // For now, don't try to run quartz jobs as authenticated if the user + // that created the job is a system user. See PPP-2350 + requiresUpdate = SecurityHelper.getInstance().runAsAnonymous( actionBeanRunner ); + } else { + try { + requiresUpdate = SecurityHelper.getInstance().runAsUser( actionUser, actionBeanRunner ); + } catch ( Throwable t ) { + Object restartFlag = jobParams.get( QuartzScheduler.RESERVEDMAPKEY_RESTART_FLAG ); + if ( restartFlag == null ) { + final SimpleJobTrigger trigger = new SimpleJobTrigger( new Date(), null, 0, 0 ); + final Class iaction = (Class) actionBean.getClass(); + // recreate the job in the context of the original creator + SecurityHelper.getInstance().runAsUser( actionUser, new Callable() { + @Override + public Void call() throws Exception { + if ( streamProvider != null ) { + streamProvider.setStreamingAction( null ); // remove generated content + } + QuartzJobKey jobKey = QuartzJobKey.parse( context.getJobDetail().getName() ); + String jobName = jobKey.getJobName(); + jobParams.put( QuartzScheduler.RESERVEDMAPKEY_RESTART_FLAG, Boolean.TRUE ); + scheduler.createJob( jobName, iaction, jobParams, trigger, streamProvider ); + log.warn( "New RunOnce job created for " + jobName + " -> possible startup synchronization error" ); + return null; + } + } ); + } else { + log.warn( "RunOnce already created, skipping" ); + throw new Exception( t ); + } + } + } + + scheduler.fireJobCompleted( actionBean, actionUser, params, streamProvider ); + + if ( requiresUpdate ) { + log.warn( "Output path for job: " + context.getJobDetail().getName() + " has changed. Job requires update" ); + try { + final IJobTrigger trigger = scheduler.getJob( context.getJobDetail().getName() ).getJobTrigger(); + final Class iaction = (Class) actionBean.getClass(); + + // remove job with outdated/invalid output path + scheduler.removeJob( context.getJobDetail().getName() ); + + // recreate the job in the context of the original creator + SecurityHelper.getInstance().runAsUser( actionUser, new Callable() { + @Override + public Void call() throws Exception { + streamProvider.setStreamingAction( null ); // remove generated content + QuartzJobKey jobKey = QuartzJobKey.parse( context.getJobDetail().getName() ); + String jobName = jobKey.getJobName(); + org.pentaho.platform.api.scheduler2.Job j = + scheduler.createJob( jobName, iaction, jobParams, trigger, streamProvider ); + log.warn( "New Job: " + j.getJobId() + " created" ); + return null; + } + } ); + } catch ( Exception e ) { + log.error( e.getMessage(), e ); + } + } + + if ( log.isDebugEnabled() ) { + log.debug( MessageFormat.format( + "Scheduling system successfully invoked action {0} as user {1} with params [ {2} ]", actionBean //$NON-NLS-1$ + .getClass().getName(), actionUser, QuartzScheduler.prettyPrintMap( params ) ) ); + } + + } + + private void sendEmail( Map actionParams, Map params, String filePath ) { + try { + IUnifiedRepository repo = PentahoSystem.get( IUnifiedRepository.class ); + RepositoryFile sourceFile = repo.getFile( filePath ); + // add metadata + Map metadata = repo.getFileMetadata( sourceFile.getId() ); + String lineageId = (String) params.get( QuartzScheduler.RESERVEDMAPKEY_LINEAGE_ID ); + metadata.put( QuartzScheduler.RESERVEDMAPKEY_LINEAGE_ID, lineageId ); + repo.setFileMetadata( sourceFile.getId(), metadata ); + // send email + SimpleRepositoryFileData data = repo.getDataForRead( sourceFile.getId(), SimpleRepositoryFileData.class ); + // if email is setup and we have tos, then do it + Emailer emailer = new Emailer(); + if ( !emailer.setup() ) { + // email not configured + return; + } + String to = (String) actionParams.get( "_SCH_EMAIL_TO" ); + String cc = (String) actionParams.get( "_SCH_EMAIL_CC" ); + String bcc = (String) actionParams.get( "_SCH_EMAIL_BCC" ); + if ( ( to == null || "".equals( to ) ) && ( cc == null || "".equals( cc ) ) + && ( bcc == null || "".equals( bcc ) ) ) { + // no destination + return; + } + emailer.setTo( to ); + emailer.setCc( cc ); + emailer.setBcc( bcc ); + emailer.setAttachment( data.getInputStream() ); + emailer.setAttachmentName( "attachment" ); + String attachmentName = (String) actionParams.get( "_SCH_EMAIL_ATTACHMENT_NAME" ); + if ( attachmentName != null && !"".equals( attachmentName ) ) { + String path = filePath; + if ( path.endsWith( ".*" ) ) { + path = path.replace( ".*", "" ); + } + String extension = MimeHelper.getExtension( data.getMimeType() ); + if ( extension == null ) { + extension = ".bin"; + } + if ( !attachmentName.endsWith( extension ) ) { + emailer.setAttachmentName( attachmentName + extension ); + } else { + emailer.setAttachmentName( attachmentName ); + } + } else if ( data != null ) { + String path = filePath; + if ( path.endsWith( ".*" ) ) { + path = path.replace( ".*", "" ); + } + String extension = MimeHelper.getExtension( data.getMimeType() ); + if ( extension == null ) { + extension = ".bin"; + } + path = path.substring( path.lastIndexOf( "/" ) + 1, path.length() ); + if ( !path.endsWith( extension ) ) { + emailer.setAttachmentName( path + extension ); + } else { + emailer.setAttachmentName( path ); + } + } + if ( data == null || data.getMimeType() == null || "".equals( data.getMimeType() ) ) { + emailer.setAttachmentMimeType( "binary/octet-stream" ); + } else { + emailer.setAttachmentMimeType( data.getMimeType() ); + } + String subject = (String) actionParams.get( "_SCH_EMAIL_SUBJECT" ); + if ( subject != null && !"".equals( subject ) ) { + emailer.setSubject( subject ); + } else { + emailer.setSubject( "Pentaho Scheduler: " + emailer.getAttachmentName() ); + } + String message = (String) actionParams.get( "_SCH_EMAIL_MESSAGE" ); + if ( subject != null && !"".equals( subject ) ) { + emailer.setBody( message ); + } + emailer.send(); + } catch ( Exception e ) { + log.warn( e.getMessage(), e ); + } + } + + class LoggingJobExecutionException extends JobExecutionException { + private static final long serialVersionUID = -4124907454208034326L; + + public LoggingJobExecutionException( String msg ) { + super( msg ); + log.error( msg ); + } + + public LoggingJobExecutionException( String msg, Throwable t ) { + super( msg, t ); + log.error( msg, t ); + } + + } + +} diff --git a/pentaho-platform/src/main/java/org/pentaho/platform/scheduler2/quartz/QuartzScheduler.java b/pentaho-platform/src/main/java/org/pentaho/platform/scheduler2/quartz/QuartzScheduler.java new file mode 100644 index 0000000..86be52e --- /dev/null +++ b/pentaho-platform/src/main/java/org/pentaho/platform/scheduler2/quartz/QuartzScheduler.java @@ -0,0 +1,842 @@ +/*! + * This program is free software; you can redistribute it and/or modify it under the + * terms of the GNU Lesser General Public License, version 2.1 as published by the Free Software + * Foundation. + * + * You should have received a copy of the GNU Lesser General Public License along with this + * program; if not, you can obtain a copy at http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html + * or from the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; + * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU Lesser General Public License for more details. + * + * Copyright (c) 2002-2013 Pentaho Corporation.. All rights reserved. + */ + +package org.pentaho.platform.scheduler2.quartz; + +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.pentaho.platform.api.action.IAction; +import org.pentaho.platform.api.engine.IPentahoSession; +import org.pentaho.platform.api.scheduler2.ComplexJobTrigger; +import org.pentaho.platform.api.scheduler2.IBackgroundExecutionStreamProvider; +import org.pentaho.platform.api.scheduler2.IJobFilter; +import org.pentaho.platform.api.scheduler2.IJobResult; +import org.pentaho.platform.api.scheduler2.IJobTrigger; +import org.pentaho.platform.api.scheduler2.IScheduleSubject; +import org.pentaho.platform.api.scheduler2.IScheduler; +import org.pentaho.platform.api.scheduler2.ISchedulerListener; +import org.pentaho.platform.api.scheduler2.Job; +import org.pentaho.platform.api.scheduler2.Job.JobState; +import org.pentaho.platform.api.scheduler2.JobTrigger; +import org.pentaho.platform.api.scheduler2.SchedulerException; +import org.pentaho.platform.api.scheduler2.SimpleJobTrigger; +import org.pentaho.platform.api.scheduler2.recur.ITimeRecurrence; +import org.pentaho.platform.engine.core.system.PentahoSessionHolder; +import org.pentaho.platform.engine.security.SecurityHelper; +import org.pentaho.platform.scheduler2.messsages.Messages; +import org.pentaho.platform.scheduler2.recur.IncrementalRecurrence; +import org.pentaho.platform.scheduler2.recur.QualifiedDayOfMonth; +import org.pentaho.platform.scheduler2.recur.QualifiedDayOfWeek; +import org.pentaho.platform.scheduler2.recur.QualifiedDayOfWeek.DayOfWeek; +import org.pentaho.platform.scheduler2.recur.QualifiedDayOfWeek.DayOfWeekQualifier; +import org.pentaho.platform.scheduler2.recur.RecurrenceList; +import org.pentaho.platform.scheduler2.recur.SequentialRecurrence; +import org.quartz.Calendar; +import org.quartz.CronTrigger; +import org.quartz.JobDataMap; +import org.quartz.JobDetail; +import org.quartz.Scheduler; +import org.quartz.SchedulerFactory; +import org.quartz.SimpleTrigger; +import org.quartz.Trigger; +import org.quartz.impl.StdSchedulerFactory; + +import java.io.Serializable; +import java.security.Principal; +import java.text.MessageFormat; +import java.text.ParseException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.regex.Pattern; + +/** + * A Quartz implementation of {@link IScheduler} + * + * @author aphillips + */ +public class QuartzScheduler implements IScheduler { + + public static final String RESERVEDMAPKEY_ACTIONCLASS = "ActionAdapterQuartzJob-ActionClass"; //$NON-NLS-1$ + + public static final String RESERVEDMAPKEY_ACTIONUSER = "ActionAdapterQuartzJob-ActionUser"; //$NON-NLS-1$ + + public static final String RESERVEDMAPKEY_ACTIONID = "ActionAdapterQuartzJob-ActionId"; //$NON-NLS-1$ + + public static final String RESERVEDMAPKEY_STREAMPROVIDER = "ActionAdapterQuartzJob-StreamProvider"; //$NON-NLS-1$ + + public static final String RESERVEDMAPKEY_UIPASSPARAM = "uiPassParam"; + + public static final String RESERVEDMAPKEY_LINEAGE_ID = "lineage-id"; + + public static final String RESERVEDMAPKEY_RESTART_FLAG = "ActionAdapterQuartzJob-Restart"; + + private static final Log logger = LogFactory.getLog( QuartzScheduler.class ); + + private SchedulerFactory quartzSchedulerFactory; + + private Scheduler quartzScheduler; + + private ArrayList listeners = new ArrayList(); + + private static final Pattern listPattern = Pattern.compile( "\\d+" ); //$NON-NLS-1$ + + private static final Pattern dayOfWeekRangePattern = Pattern.compile( ".*\\-.*" ); //$NON-NLS-1$ + + private static final Pattern sequencePattern = Pattern.compile( "\\d+\\-\\d+" ); //$NON-NLS-1$ + + private static final Pattern intervalPattern = Pattern.compile( "\\d+/\\d+" ); //$NON-NLS-1$ + + private static final Pattern qualifiedDayPattern = Pattern.compile( "\\d+#\\d+" ); //$NON-NLS-1$ + + private static final Pattern lastDayPattern = Pattern.compile( "\\d+L" ); //$NON-NLS-1$ + + public QuartzScheduler( SchedulerFactory schedulerFactory ) { + this.quartzSchedulerFactory = schedulerFactory; + } + + public QuartzScheduler() { + this.quartzSchedulerFactory = new StdSchedulerFactory(); + } + + /** + * Overrides the default Quartz {@link SchedulerFactory}. Note: depending on the type of scheduler you are setting + * here, there may be initializing required prior to this setter being called. Only the + * {@link SchedulerFactory#getScheduler()} will be called later, so the factory set here must already be in a state + * where that invocation will be successful. + * + * @param quartzSchedulerFactory + * the quartz factory to use for generating scheduler instances + */ + public void setQuartzSchedulerFactory( SchedulerFactory quartzSchedulerFactory ) throws SchedulerException { + this.quartzSchedulerFactory = quartzSchedulerFactory; + if( quartzScheduler != null ){ + this.shutdown(); + quartzScheduler = null; + } + } + + public Scheduler getQuartzScheduler() throws org.quartz.SchedulerException { + if ( quartzScheduler == null ) { + /* + * Currently, quartz will always give you the same scheduler object when any factory instance is asked for a + * scheduler. In other words there is no such thing as scheduler-level isolation. If we really need multiple + * isolated scheduler instances, we should investigate named schedulers, but this API getScheduler() will not help + * us in that regard. + */ + quartzScheduler = quartzSchedulerFactory.getScheduler(); + } + + logger.debug( "Using quartz scheduler " + quartzScheduler ); //$NON-NLS-1$ + return quartzScheduler; + } + + private void setQuartzScheduler( Scheduler quartzScheduler ) { + this.quartzScheduler = quartzScheduler; + } + + /** {@inheritDoc} */ + public Job createJob( String jobName, String actionId, Map jobParams, IJobTrigger trigger ) + throws SchedulerException { + return createJob( jobName, actionId, jobParams, trigger, null ); + } + + /** {@inheritDoc} */ + public Job createJob( String jobName, Class action, Map jobParams, + IJobTrigger trigger ) throws SchedulerException { + return createJob( jobName, action, jobParams, trigger, null ); + } + + /** {@inheritDoc} */ + public Job createJob( String jobName, Class action, Map jobParams, + IJobTrigger trigger, IBackgroundExecutionStreamProvider outputStreamProvider ) throws SchedulerException { + + if ( action == null ) { + throw new SchedulerException( Messages.getInstance().getString( "QuartzScheduler.ERROR_0003_ACTION_IS_NULL" ) ); //$NON-NLS-1$ + } + + if ( jobParams == null ) { + jobParams = new HashMap(); + } + + jobParams.put( RESERVEDMAPKEY_ACTIONCLASS, action.getName() ); + Job ret = createJob( jobName, jobParams, trigger, outputStreamProvider ); + ret.setSchedulableClass( action.getName() ); + return ret; + } + + /** {@inheritDoc} */ + public Job createJob( String jobName, String actionId, Map jobParams, IJobTrigger trigger, + IBackgroundExecutionStreamProvider outputStreamProvider ) throws SchedulerException { + if ( StringUtils.isEmpty( actionId ) ) { + throw new SchedulerException( Messages.getInstance().getString( "QuartzScheduler.ERROR_0003_ACTION_IS_NULL" ) ); //$NON-NLS-1$ + } + + if ( jobParams == null ) { + jobParams = new HashMap(); + } + + jobParams.put( RESERVEDMAPKEY_ACTIONID, actionId ); + Job ret = createJob( jobName, jobParams, trigger, outputStreamProvider ); + ret.setSchedulableClass( "" ); //$NON-NLS-1$ + return ret; + } + + public static Trigger createQuartzTrigger( IJobTrigger jobTrigger, QuartzJobKey jobId ) throws SchedulerException { + Trigger quartzTrigger = null; + if ( jobTrigger instanceof ComplexJobTrigger ) { + try { + quartzTrigger = + new CronTrigger( jobId.toString(), jobId.getUserName(), jobTrigger.getCronString() != null ? jobTrigger + .getCronString() : QuartzCronStringFactory.createCronString( (ComplexJobTrigger) jobTrigger ) ); + } catch ( ParseException e ) { + throw new SchedulerException( Messages.getInstance().getString( + "QuartzScheduler.ERROR_0001_FAILED_TO_SCHEDULE_JOB", jobId.getJobName() ), e ); //$NON-NLS-1$ + } + } else if ( jobTrigger instanceof SimpleJobTrigger ) { + SimpleJobTrigger simpleTrigger = (SimpleJobTrigger) jobTrigger; + long interval = simpleTrigger.getRepeatInterval(); + if ( interval > 0 ) { + interval *= 1000; + } + int repeatCount = + simpleTrigger.getRepeatCount() < 0 ? SimpleTrigger.REPEAT_INDEFINITELY : simpleTrigger.getRepeatCount(); + quartzTrigger = + new SimpleTrigger( jobId.toString(), jobId.getUserName(), simpleTrigger.getStartTime(), simpleTrigger + .getEndTime(), repeatCount, interval ); + } else { + throw new SchedulerException( Messages.getInstance().getString( "QuartzScheduler.ERROR_0002_TRIGGER_WRONG_TYPE" ) ); //$NON-NLS-1$ + } + if ( quartzTrigger instanceof SimpleTrigger ) { + quartzTrigger.setMisfireInstruction( SimpleTrigger.MISFIRE_INSTRUCTION_RESCHEDULE_NEXT_WITH_REMAINING_COUNT ); + } else { + quartzTrigger.setMisfireInstruction( SimpleTrigger.MISFIRE_INSTRUCTION_FIRE_NOW ); + } + return quartzTrigger; + } + + private JobDetail createJobDetails( QuartzJobKey jobId, Map jobParams ) { + JobDetail jobDetail = new JobDetail( jobId.toString(), jobId.getUserName(), BlockingQuartzJob.class ); + jobParams.put( RESERVEDMAPKEY_ACTIONUSER, jobId.getUserName() ); + JobDataMap jobDataMap = new JobDataMap( jobParams ); + jobDetail.setJobDataMap( jobDataMap ); + return jobDetail; + } + + private Calendar createQuartzCalendar( ComplexJobTrigger complexJobTrigger ) { + Calendar triggerCalendar = null; + if ( ( complexJobTrigger.getStartTime() != null ) || ( complexJobTrigger.getEndTime() != null ) ) { + triggerCalendar = + new QuartzSchedulerAvailability( complexJobTrigger.getStartTime(), complexJobTrigger.getEndTime() ); + } + return triggerCalendar; + } + + /** {@inheritDoc} */ + protected Job createJob( String jobName, Map jobParams, IJobTrigger trigger, + IBackgroundExecutionStreamProvider outputStreamProvider ) throws SchedulerException { + + String curUser = getCurrentUser(); + + // determine if the job params tell us who owns the job + Serializable jobOwner = jobParams.get( RESERVEDMAPKEY_ACTIONUSER ); + if ( jobOwner != null && jobOwner.toString().length() > 0 ) { + curUser = jobOwner.toString(); + } + + QuartzJobKey jobId = new QuartzJobKey( jobName, curUser ); + + Trigger quartzTrigger = createQuartzTrigger( trigger, jobId ); + + if( trigger.getEndTime() != null ){ + quartzTrigger.setEndTime( trigger.getEndTime() ); + } + + Calendar triggerCalendar = + quartzTrigger instanceof CronTrigger ? createQuartzCalendar( (ComplexJobTrigger) trigger ) : null; + + if ( outputStreamProvider != null ) { + jobParams.put( RESERVEDMAPKEY_STREAMPROVIDER, outputStreamProvider ); + } + + if ( trigger.getUiPassParam() != null ) { + jobParams.put( RESERVEDMAPKEY_UIPASSPARAM, trigger.getUiPassParam() ); + } + + if ( !jobParams.containsKey( RESERVEDMAPKEY_LINEAGE_ID ) ) { + String uuid = UUID.randomUUID().toString(); + jobParams.put( RESERVEDMAPKEY_LINEAGE_ID, uuid ); + } + + JobDetail jobDetail = createJobDetails( jobId, jobParams ); + + try { + Scheduler scheduler = getQuartzScheduler(); + if ( triggerCalendar != null ) { + scheduler.addCalendar( jobId.toString(), triggerCalendar, false, false ); + quartzTrigger.setCalendarName( jobId.toString() ); + } + logger + .debug( MessageFormat + .format( + "Scheduling job {0} with trigger {1} and job parameters [ {2} ]", jobId.toString(), trigger, prettyPrintMap( jobParams ) ) ); //$NON-NLS-1$ + scheduler.scheduleJob( jobDetail, quartzTrigger ); + } catch ( org.quartz.SchedulerException e ) { + throw new SchedulerException( Messages.getInstance().getString( + "QuartzScheduler.ERROR_0001_FAILED_TO_SCHEDULE_JOB", jobName ), e ); //$NON-NLS-1$ + } + + Job job = new Job(); + job.setJobParams( jobParams ); + job.setJobTrigger( (JobTrigger) trigger ); + job.setNextRun( quartzTrigger.getNextFireTime() ); + job.setLastRun( quartzTrigger.getPreviousFireTime() ); + job.setJobId( jobId.toString() ); + job.setJobName( jobName ); + job.setUserName( curUser ); + job.setState( JobState.NORMAL ); + + return job; + } + + @Override + public void updateJob( String jobId, Map jobParams, IJobTrigger trigger ) + throws SchedulerException { + QuartzJobKey jobKey = QuartzJobKey.parse( jobId ); + + Trigger quartzTrigger = createQuartzTrigger( trigger, jobKey ); + quartzTrigger.setJobName( jobId ); + quartzTrigger.setJobGroup( jobKey.getUserName() ); + + Calendar triggerCalendar = + quartzTrigger instanceof CronTrigger ? createQuartzCalendar( (ComplexJobTrigger) trigger ) : null; + + try { + Scheduler scheduler = getQuartzScheduler(); + // int triggerState = scheduler.getTriggerState(jobId, jobKey.getUserName()); + // if (triggerState != Trigger.STATE_PAUSED) { + // scheduler.pauseTrigger(jobId, jobKey.getUserName()); + // } + JobDetail origJobDetail = scheduler.getJobDetail( jobId, jobKey.getUserName() ); + if ( origJobDetail.getJobDataMap().containsKey( RESERVEDMAPKEY_ACTIONCLASS ) ) { + jobParams.put( RESERVEDMAPKEY_ACTIONCLASS, origJobDetail.getJobDataMap().get( RESERVEDMAPKEY_ACTIONCLASS ) + .toString() ); + } else if ( origJobDetail.getJobDataMap().containsKey( RESERVEDMAPKEY_ACTIONID ) ) { + jobParams + .put( RESERVEDMAPKEY_ACTIONID, origJobDetail.getJobDataMap().get( RESERVEDMAPKEY_ACTIONID ).toString() ); + } + + if ( origJobDetail.getJobDataMap().containsKey( RESERVEDMAPKEY_STREAMPROVIDER ) ) { + jobParams.put( RESERVEDMAPKEY_STREAMPROVIDER, (Serializable) origJobDetail.getJobDataMap().get( + RESERVEDMAPKEY_STREAMPROVIDER ) ); + } + if ( origJobDetail.getJobDataMap().containsKey( RESERVEDMAPKEY_UIPASSPARAM ) ) { + jobParams.put( RESERVEDMAPKEY_UIPASSPARAM, (Serializable) origJobDetail.getJobDataMap().get( + RESERVEDMAPKEY_UIPASSPARAM ) ); + } + + JobDetail jobDetail = createJobDetails( jobKey, jobParams ); + scheduler.addJob( jobDetail, true ); + if ( triggerCalendar != null ) { + scheduler.addCalendar( jobId.toString(), triggerCalendar, true, true ); + quartzTrigger.setCalendarName( jobId.toString() ); + } + scheduler.rescheduleJob( jobId, jobKey.getUserName(), quartzTrigger ); + // if (triggerState != Trigger.STATE_PAUSED) { + // scheduler.resumeTrigger(jobId, jobKey.getUserName()); + // } + logger + .debug( MessageFormat + .format( + "Scheduling job {0} with trigger {1} and job parameters [ {2} ]", jobId.toString(), trigger, prettyPrintMap( jobParams ) ) ); //$NON-NLS-1$ + } catch ( org.quartz.SchedulerException e ) { + throw new SchedulerException( Messages.getInstance().getString( + "QuartzScheduler.ERROR_0001_FAILED_TO_SCHEDULE_JOB", jobKey.getJobName() ), e ); //$NON-NLS-1$ + } + } + + /** {@inheritDoc} */ + public Map getAvailabilityWindows() { + // TODO Auto-generated method stub + return null; + } + + /** {@inheritDoc} */ + public List getJobHistory( String jobId ) { + // TODO Auto-generated method stub + return null; + } + + /** {@inheritDoc} */ + public void triggerNow( String jobId ) throws SchedulerException { + try { + QuartzJobKey jobKey = QuartzJobKey.parse( jobId ); + Scheduler scheduler = getQuartzScheduler(); + String groupName = jobKey.getUserName(); + for ( Trigger trigger : scheduler.getTriggersOfJob( jobId, groupName ) ) { + if ( "MANUAL_TRIGGER".equals( trigger.getGroup() ) ) { + continue; + } + if ( trigger instanceof SimpleTrigger ) { + ( (SimpleTrigger) trigger ).setPreviousFireTime( new Date() ); + } else if ( trigger instanceof CronTrigger ) { + ( (CronTrigger) trigger ).setPreviousFireTime( new Date() ); + } + // force the trigger to be updated with the previous fire time + scheduler.rescheduleJob( jobId, jobKey.getUserName(), trigger ); + } + + scheduler.triggerJob( jobId, jobKey.getUserName() ); + } catch ( org.quartz.SchedulerException e ) { + throw new SchedulerException( Messages.getInstance().getString( + "QuartzScheduler.ERROR_0007_FAILED_TO_GET_JOB", jobId ), e ); //$NON-NLS-1$ + } + } + + /** {@inheritDoc} */ + @SuppressWarnings( "unchecked" ) + public Job getJob( String jobId ) throws SchedulerException { + try { + Scheduler scheduler = getQuartzScheduler(); + QuartzJobKey jobKey = QuartzJobKey.parse( jobId ); + String groupName = jobKey.getUserName(); + for ( Trigger trigger : scheduler.getTriggersOfJob( jobId, groupName ) ) { + Job job = new Job(); + JobDetail jobDetail = scheduler.getJobDetail( jobId, groupName ); + if ( jobDetail != null ) { + JobDataMap jobDataMap = jobDetail.getJobDataMap(); + if ( jobDataMap != null ) { + Map wrappedMap = jobDataMap.getWrappedMap(); + job.setJobParams( wrappedMap ); + } + } + + job.setJobId( jobId ); + setJobTrigger( scheduler, job, trigger ); + job.setUserName( jobDetail.getGroup() ); + return job; + } + } catch ( org.quartz.SchedulerException e ) { + throw new SchedulerException( Messages.getInstance().getString( + "QuartzScheduler.ERROR_0007_FAILED_TO_GET_JOB", jobId ), e ); //$NON-NLS-1$ + } + return null; + } + + /** {@inheritDoc} */ + @SuppressWarnings( "unchecked" ) + public List getJobs( IJobFilter filter ) throws SchedulerException { + ArrayList jobs = new ArrayList(); + try { + Scheduler scheduler = getQuartzScheduler(); + for ( String groupName : scheduler.getJobGroupNames() ) { + for ( String jobId : scheduler.getJobNames( groupName ) ) { + for ( Trigger trigger : scheduler.getTriggersOfJob( jobId, groupName ) ) { + if ( "MANUAL_TRIGGER".equals( trigger.getGroup() ) ) { + continue; + } + Job job = new Job(); + job.setGroupName( groupName ); + JobDetail jobDetail = scheduler.getJobDetail( jobId, groupName ); + if ( jobDetail != null ) { + job.setUserName( jobDetail.getGroup() ); + JobDataMap jobDataMap = jobDetail.getJobDataMap(); + if ( jobDataMap != null ) { + Map wrappedMap = jobDataMap.getWrappedMap(); + job.setJobParams( wrappedMap ); + } + } + + job.setJobId( jobId ); + setJobTrigger( scheduler, job, trigger ); + job.setJobName( QuartzJobKey.parse( jobId ).getJobName() ); + job.setNextRun( trigger.getNextFireTime() ); + job.setLastRun( trigger.getPreviousFireTime() ); + if ( ( filter == null ) || filter.accept( job ) ) { + jobs.add( job ); + } + } + } + } + } catch ( org.quartz.SchedulerException e ) { + throw new SchedulerException( + Messages.getInstance().getString( "QuartzScheduler.ERROR_0004_FAILED_TO_LIST_JOBS" ), e ); //$NON-NLS-1$ + } + return jobs; + } + + private void setJobTrigger( Scheduler scheduler, Job job, Trigger trigger ) throws SchedulerException, + org.quartz.SchedulerException { + QuartzJobKey jobKey = QuartzJobKey.parse( job.getJobId() ); + String groupName = jobKey.getUserName(); + + if ( trigger instanceof SimpleTrigger ) { + SimpleTrigger simpleTrigger = (SimpleTrigger) trigger; + SimpleJobTrigger simpleJobTrigger = new SimpleJobTrigger(); + simpleJobTrigger.setStartTime( simpleTrigger.getStartTime() ); + simpleJobTrigger.setEndTime( simpleTrigger.getEndTime() ); + simpleJobTrigger.setUiPassParam( (String) job.getJobParams().get( RESERVEDMAPKEY_UIPASSPARAM ) ); + long interval = simpleTrigger.getRepeatInterval(); + if ( interval > 0 ) { + interval /= 1000; + } + simpleJobTrigger.setRepeatInterval( interval ); + simpleJobTrigger.setRepeatCount( simpleTrigger.getRepeatCount() ); + job.setJobTrigger( simpleJobTrigger ); + } else if ( trigger instanceof CronTrigger ) { + CronTrigger cronTrigger = (CronTrigger) trigger; + ComplexJobTrigger complexJobTrigger = createComplexTrigger( cronTrigger.getCronExpression() ); + complexJobTrigger.setUiPassParam( (String) job.getJobParams().get( RESERVEDMAPKEY_UIPASSPARAM ) ); + complexJobTrigger.setCronString( ( (CronTrigger) trigger ).getCronExpression() ); + job.setJobTrigger( complexJobTrigger ); + if ( trigger.getCalendarName() != null ) { + Calendar calendar = scheduler.getCalendar( trigger.getCalendarName() ); + if ( calendar instanceof QuartzSchedulerAvailability ) { + QuartzSchedulerAvailability quartzSchedulerAvailability = (QuartzSchedulerAvailability) calendar; + complexJobTrigger.setStartTime( quartzSchedulerAvailability.getStartTime() ); + complexJobTrigger.setEndTime( quartzSchedulerAvailability.getEndTime() ); + } + } + complexJobTrigger.setCronString( ( (CronTrigger) trigger ).getCronExpression() ); + } + + int triggerState = scheduler.getTriggerState( job.getJobId(), groupName ); + switch ( triggerState ) { + case Trigger.STATE_NORMAL: + job.setState( JobState.NORMAL ); + break; + case Trigger.STATE_BLOCKED: + job.setState( JobState.BLOCKED ); + break; + case Trigger.STATE_COMPLETE: + job.setState( JobState.COMPLETE ); + break; + case Trigger.STATE_ERROR: + job.setState( JobState.ERROR ); + break; + case Trigger.STATE_PAUSED: + job.setState( JobState.PAUSED ); + break; + default: + job.setState( JobState.UNKNOWN ); + break; + } + + job.setJobName( QuartzJobKey.parse( job.getJobId() ).getJobName() ); + job.setNextRun( trigger.getNextFireTime() ); + job.setLastRun( trigger.getPreviousFireTime() ); + + } + + /** {@inheritDoc} */ + public Integer getMinScheduleInterval( IScheduleSubject subject ) { + // TODO Auto-generated method stub + return 0; + } + + /** {@inheritDoc} */ + public ComplexJobTrigger getSubjectAvailabilityWindow( IScheduleSubject subject ) { + // TODO Auto-generated method stub + return null; + } + + /** {@inheritDoc} */ + public void pause() throws SchedulerException { + try { + getQuartzScheduler().standby(); + } catch ( org.quartz.SchedulerException e ) { + throw new SchedulerException( e ); + } + } + + /** {@inheritDoc} */ + public void pauseJob( String jobId ) throws SchedulerException { + try { + Scheduler scheduler = getQuartzScheduler(); + scheduler.pauseJob( jobId, QuartzJobKey.parse( jobId ).getUserName() ); + } catch ( org.quartz.SchedulerException e ) { + throw new SchedulerException( Messages.getInstance() + .getString( "QuartzScheduler.ERROR_0005_FAILED_TO_PAUSE_JOBS" ), e ); //$NON-NLS-1$ + } + } + + /** {@inheritDoc} */ + public void removeJob( String jobId ) throws SchedulerException { + try { + Scheduler scheduler = getQuartzScheduler(); + scheduler.deleteJob( jobId, QuartzJobKey.parse( jobId ).getUserName() ); + } catch ( org.quartz.SchedulerException e ) { + throw new SchedulerException( Messages.getInstance() + .getString( "QuartzScheduler.ERROR_0005_FAILED_TO_PAUSE_JOBS" ), e ); //$NON-NLS-1$ + } + } + + /** {@inheritDoc} */ + public void start() throws SchedulerException { + try { + getQuartzScheduler().start(); + } catch ( org.quartz.SchedulerException e ) { + throw new SchedulerException( e ); + } + } + + /** {@inheritDoc} */ + public void resumeJob( String jobId ) throws SchedulerException { + try { + Scheduler scheduler = getQuartzScheduler(); + scheduler.resumeJob( jobId, QuartzJobKey.parse( jobId ).getUserName() ); + } catch ( org.quartz.SchedulerException e ) { + throw new SchedulerException( Messages.getInstance().getString( + "QuartzScheduler.ERROR_0005_FAILED_TO_RESUME_JOBS" ), e ); //$NON-NLS-1$ + } + } + + /** {@inheritDoc} */ + public void setAvailabilityWindows( Map availability ) { + // TODO Auto-generated method stub + + } + + /** {@inheritDoc} */ + public void setMinScheduleInterval( IScheduleSubject subject, int intervalInSeconds ) { + // TODO Auto-generated method stub + + } + + /** {@inheritDoc} */ + public void setSubjectAvailabilityWindow( IScheduleSubject subject, ComplexJobTrigger availability ) { + // TODO Auto-generated method stub + + } + + /** + * @return + */ + protected String getCurrentUser() { + IPentahoSession session = PentahoSessionHolder.getSession(); + if ( session == null ) { + return null; + } + Principal p = SecurityHelper.getInstance().getAuthentication(); + return ( p == null ) ? null : p.getName(); + } + + public static ComplexJobTrigger createComplexTrigger( String cronExpression ) { + ComplexJobTrigger complexJobTrigger = new ComplexJobTrigger(); + complexJobTrigger.setHourlyRecurrence( (ITimeRecurrence) null ); + complexJobTrigger.setMinuteRecurrence( (ITimeRecurrence) null ); + complexJobTrigger.setSecondRecurrence( (ITimeRecurrence) null ); + + for ( ITimeRecurrence recurrence : parseRecurrence( cronExpression, 6 ) ) { + complexJobTrigger.addYearlyRecurrence( recurrence ); + } + for ( ITimeRecurrence recurrence : parseRecurrence( cronExpression, 4 ) ) { + complexJobTrigger.addMonthlyRecurrence( recurrence ); + } + List dayOfWeekRecurrences = parseDayOfWeekRecurrences( cronExpression ); + List dayOfMonthRecurrences = parseRecurrence( cronExpression, 3 ); + if ( ( dayOfWeekRecurrences.size() > 0 ) && ( dayOfMonthRecurrences.size() == 0 ) ) { + for ( ITimeRecurrence recurrence : dayOfWeekRecurrences ) { + complexJobTrigger.addDayOfWeekRecurrence( recurrence ); + } + } else if ( ( dayOfWeekRecurrences.size() == 0 ) && ( dayOfMonthRecurrences.size() > 0 ) ) { + for ( ITimeRecurrence recurrence : dayOfMonthRecurrences ) { + complexJobTrigger.addDayOfMonthRecurrence( recurrence ); + } + } + for ( ITimeRecurrence recurrence : parseRecurrence( cronExpression, 2 ) ) { + complexJobTrigger.addHourlyRecurrence( recurrence ); + } + for ( ITimeRecurrence recurrence : parseRecurrence( cronExpression, 1 ) ) { + complexJobTrigger.addMinuteRecurrence( recurrence ); + } + for ( ITimeRecurrence recurrence : parseRecurrence( cronExpression, 0 ) ) { + complexJobTrigger.addSecondRecurrence( recurrence ); + } + return complexJobTrigger; + } + + private static List parseDayOfWeekRecurrences( String cronExpression ) { + List dayOfWeekRecurrence = new ArrayList(); + String delims = "[ ]+"; //$NON-NLS-1$ + String[] tokens = cronExpression.split( delims ); + if ( tokens.length >= 6 ) { + String dayOfWeekTokens = tokens[5]; + tokens = dayOfWeekTokens.split( "," ); //$NON-NLS-1$ + if ( ( tokens.length > 1 ) || !( tokens[0].equals( "*" ) || tokens[0].equals( "?" ) ) ) { //$NON-NLS-1$ //$NON-NLS-2$ + RecurrenceList dayOfWeekList = null; + for ( String token : tokens ) { + if ( listPattern.matcher( token ).matches() ) { + if ( dayOfWeekList == null ) { + dayOfWeekList = new RecurrenceList(); + } + dayOfWeekList.getValues().add( Integer.parseInt( token ) ); + } else { + if ( dayOfWeekList != null ) { + dayOfWeekRecurrence.add( dayOfWeekList ); + dayOfWeekList = null; + } + if ( sequencePattern.matcher( token ).matches() ) { + String[] days = token.split( "-" ); //$NON-NLS-1$ + dayOfWeekRecurrence.add( new SequentialRecurrence( Integer.parseInt( days[0] ), Integer + .parseInt( days[1] ) ) ); + } else if ( intervalPattern.matcher( token ).matches() ) { + String[] days = token.split( "/" ); //$NON-NLS-1$ + dayOfWeekRecurrence.add( new IncrementalRecurrence( Integer.parseInt( days[0] ), Integer + .parseInt( days[1] ) ) ); + } else if ( qualifiedDayPattern.matcher( token ).matches() ) { + String[] days = token.split( "#" ); //$NON-NLS-1$ + dayOfWeekRecurrence + .add( new QualifiedDayOfWeek( Integer.parseInt( days[1] ), Integer.parseInt( days[0] ) ) ); + } else if ( lastDayPattern.matcher( token ).matches() ) { + DayOfWeek dayOfWeek = + DayOfWeek.values()[( Integer.parseInt( token.substring( 0, token.length() - 1 ) ) - 1 ) % 7]; + dayOfWeekRecurrence.add( new QualifiedDayOfWeek( DayOfWeekQualifier.LAST, dayOfWeek ) ); + } else if ( dayOfWeekRangePattern.matcher( token ).matches() ) { + String[] days = token.split( "-" ); //$NON-NLS-1$ + int start = DayOfWeek.valueOf( days[0] ).ordinal(); + int finish = DayOfWeek.valueOf( days[1] ).ordinal(); + dayOfWeekRecurrence.add( new SequentialRecurrence( start, finish ) ); + } else { + dayOfWeekList = new RecurrenceList(); + dayOfWeekList.getValues().add( DayOfWeek.valueOf( token ).ordinal() ); + dayOfWeekRecurrence.add( dayOfWeekList ); + dayOfWeekList = null; + // } else { + // throw new IllegalArgumentException(Messages.getInstance().getErrorString( + // "ComplexJobTrigger.ERROR_0001_InvalidCronExpression")); //$NON-NLS-1$ + } + } + + } + if ( dayOfWeekList != null ) { + dayOfWeekRecurrence.add( dayOfWeekList ); + } + } + } else { + throw new IllegalArgumentException( Messages.getInstance().getErrorString( + "ComplexJobTrigger.ERROR_0001_InvalidCronExpression" ) ); //$NON-NLS-1$ + } + return dayOfWeekRecurrence; + } + + private static List parseRecurrence( String cronExpression, int tokenIndex ) { + List timeRecurrence = new ArrayList(); + String delims = "[ ]+"; //$NON-NLS-1$ + String[] tokens = cronExpression.split( delims ); + if ( tokens.length > tokenIndex ) { + String timeTokens = tokens[tokenIndex]; + tokens = timeTokens.split( "," ); //$NON-NLS-1$ + if ( ( tokens.length > 1 ) || !( tokens[0].equals( "*" ) || tokens[0].equals( "?" ) ) ) { //$NON-NLS-1$ //$NON-NLS-2$ + RecurrenceList timeList = null; + for ( String token : tokens ) { + if ( listPattern.matcher( token ).matches() ) { + if ( timeList == null ) { + timeList = new RecurrenceList(); + } + timeList.getValues().add( Integer.parseInt( token ) ); + } else { + if ( timeList != null ) { + timeRecurrence.add( timeList ); + timeList = null; + } + if ( sequencePattern.matcher( token ).matches() ) { + String[] days = token.split( "-" ); //$NON-NLS-1$ + timeRecurrence.add( new SequentialRecurrence( Integer.parseInt( days[0] ), + Integer.parseInt( days[ 1 ] ) ) ); + } else if ( intervalPattern.matcher( token ).matches() ) { + String[] days = token.split( "/" ); //$NON-NLS-1$ + timeRecurrence + .add( new IncrementalRecurrence( Integer.parseInt( days[ 0 ] ), Integer.parseInt( days[ 1 ] ) ) ); + } else if ( "L".equalsIgnoreCase( token ) ) { + timeRecurrence.add( new QualifiedDayOfMonth() ); + } else { + throw new IllegalArgumentException( Messages.getInstance().getErrorString( + "ComplexJobTrigger.ERROR_0001_InvalidCronExpression" ) ); //$NON-NLS-1$ + } + } + + } + if ( timeList != null ) { + timeRecurrence.add( timeList ); + } + } + } else { + throw new IllegalArgumentException( Messages.getInstance().getErrorString( + "ComplexJobTrigger.ERROR_0001_InvalidCronExpression" ) ); //$NON-NLS-1$ + } + return timeRecurrence; + } + + /** {@inheritDoc} */ + public SchedulerStatus getStatus() throws SchedulerException { + SchedulerStatus schedulerStatus = SchedulerStatus.STOPPED; + try { + if ( getQuartzScheduler().isInStandbyMode() ) { + schedulerStatus = SchedulerStatus.PAUSED; + } else if ( getQuartzScheduler().isStarted() ) { + schedulerStatus = SchedulerStatus.RUNNING; + } + } catch ( org.quartz.SchedulerException e ) { + throw new SchedulerException( Messages.getInstance().getString( + "QuartzScheduler.ERROR_0006_FAILED_TO_GET_SCHEDULER_STATUS" ), e ); //$NON-NLS-1$ + } + return schedulerStatus; + } + + /** {@inheritDoc} */ + public void shutdown() throws SchedulerException { + try { + boolean waitForJobsToComplete = true; + getQuartzScheduler().shutdown( waitForJobsToComplete ); + setQuartzScheduler(null); + } catch ( org.quartz.SchedulerException e ) { + throw new SchedulerException( e ); + } + } + + public static String prettyPrintMap( Map map ) { + StringBuilder b = new StringBuilder(); + for ( Map.Entry entry : map.entrySet() ) { + b.append( entry.getKey() ); + b.append( "=" ); //$NON-NLS-1$ + b.append( entry.getValue() ); + b.append( "; " ); //$NON-NLS-1$ + } + return b.toString(); + } + + public void addListener( ISchedulerListener listener ) { + listeners.add( listener ); + } + + public void setListeners( Collection listeners ) { + this.listeners.addAll( listeners ); + } + + public void fireJobCompleted( IAction actionBean, String actionUser, Map params, + IBackgroundExecutionStreamProvider streamProvider ) { + for ( ISchedulerListener listener : listeners ) { + listener.jobCompleted( actionBean, actionUser, params, streamProvider ); + } + } +} diff --git a/pom.xml b/pom.xml new file mode 100644 index 0000000..392d313 --- /dev/null +++ b/pom.xml @@ -0,0 +1,74 @@ + + 4.0.0 + + com.github.zhicwu + pdi-cluster + 6.1.0.1-SNAPSHOT + pom + PDI Cluster + Instructions and workarounds for building a cluster using Pentaho BA server and Kettle. + https://github.com/zhicwu/pdi-cluster + 2016 + + + pentaho-kettle + pentaho-platform + + + + 6.1.0.1-196 + UTF-8 + ${project.basedir} + 3.5.1 + 1.8 + + + + + + src/main/resources + true + + + + + org.apache.maven.plugins + maven-compiler-plugin + ${compiler-plugin.version} + + ${java.version} + ${java.version} + + + + + + + + pentaho-repo + http://repo.pentaho.org/artifactory/repo/ + + + + + + Apache 2 + http://www.apache.org/licenses/LICENSE-2.0.txt + repo + Apache License Version 2.0 + + + + + scm:git:git@github.com:zhicwu/pdi-cluster.git + scm:git:git@github.com:zhicwu/pdi-cluster.git + https://github.com/zhicwu/pdi-cluster + ${project.version} + + + + + Zhichun Wu + + + \ No newline at end of file From 815835abe5fd56d7d036b77fd6290e2c73123f14 Mon Sep 17 00:00:00 2001 From: Zhichun Wu Date: Sun, 28 Aug 2016 17:40:25 +0800 Subject: [PATCH 02/15] Reformat code before applying changes --- pentaho-kettle/pom.xml | 3 +- .../di/core/database/util/DatabaseUtil.java | 308 +- .../di/core/logging/LogChannelFileWriter.java | 334 +- .../di/core/logging/LoggingRegistry.java | 399 +- .../src/main/java/org/pentaho/di/job/Job.java | 3968 +++--- .../main/java/org/pentaho/di/job/JobMeta.java | 4976 ++++--- .../di/job/entries/job/JobEntryJob.java | 2678 ++-- .../di/job/entries/trans/JobEntryTrans.java | 2685 ++-- .../pur/LazyUnifiedRepositoryDirectory.java | 514 +- .../org/pentaho/di/resource/ResourceUtil.java | 275 +- .../org/pentaho/di/trans/steps/Trans.java | 10426 ++++++++------- .../org/pentaho/di/trans/steps/TransMeta.java | 11038 ++++++++-------- .../di/trans/steps/append/AppendMeta.java | 320 +- .../steps/filterrows/FilterRowsMeta.java | 629 +- .../steps/javafilter/JavaFilterMeta.java | 360 +- .../trans/steps/mergejoin/MergeJoinMeta.java | 484 +- .../trans/steps/mergerows/MergeRowsMeta.java | 513 +- .../steps/tableinput/TableInputMeta.java | 983 +- .../main/java/org/pentaho/di/www/Carte.java | 619 +- .../org/pentaho/di/www/CarteSingleton.java | 452 +- .../java/org/pentaho/di/www/WebServer.java | 854 +- .../src/main/resources/kettle-servlets.xml | 167 +- pentaho-platform/pom.xml | 3 +- .../quartz/ActionAdapterQuartzJob.java | 704 +- .../scheduler2/quartz/QuartzScheduler.java | 1520 +-- pom.xml | 3 +- 26 files changed, 22363 insertions(+), 22852 deletions(-) diff --git a/pentaho-kettle/pom.xml b/pentaho-kettle/pom.xml index 28b1de3..2c26837 100644 --- a/pentaho-kettle/pom.xml +++ b/pentaho-kettle/pom.xml @@ -1,4 +1,5 @@ - + 4.0.0 com.github.zhicwu diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/core/database/util/DatabaseUtil.java b/pentaho-kettle/src/main/java/org/pentaho/di/core/database/util/DatabaseUtil.java index b2bbed3..29626ee 100644 --- a/pentaho-kettle/src/main/java/org/pentaho/di/core/database/util/DatabaseUtil.java +++ b/pentaho-kettle/src/main/java/org/pentaho/di/core/database/util/DatabaseUtil.java @@ -22,16 +22,6 @@ package org.pentaho.di.core.database.util; -import java.sql.Connection; -import java.sql.Statement; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; - -import javax.naming.InitialContext; -import javax.naming.NamingException; -import javax.sql.DataSource; - import org.pentaho.di.core.Const; import org.pentaho.di.core.database.DataSourceNamingException; import org.pentaho.di.core.database.DataSourceProviderInterface; @@ -39,6 +29,14 @@ import org.pentaho.di.i18n.BaseMessages; import javax.naming.Context; +import javax.naming.InitialContext; +import javax.naming.NamingException; +import javax.sql.DataSource; +import java.sql.Connection; +import java.sql.Statement; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; /** * Provides default implementation for looking data sources up in JNDI. @@ -47,150 +45,150 @@ */ public class DatabaseUtil implements DataSourceProviderInterface { - private static Class PKG = Database.class; // for i18n purposes, needed by Translator2!! - private static Map FoundDS = Collections.synchronizedMap( new HashMap() ); - - /** - * Clears cache of DataSources (For Unit test) - */ - protected static void clearDSCache() { - FoundDS.clear(); - } - - /** - * Since JNDI is supported different ways in different app servers, it's nearly impossible to have a ubiquitous way to - * look up a datasource. This method is intended to hide all the lookups that may be required to find a jndi name. - * - * @param dsName The Datasource name - * @return DataSource if there is one bound in JNDI - * @throws NamingException - */ - protected static DataSource getDataSourceFromJndi( String dsName, Context ctx ) throws NamingException { - if ( Const.isEmpty( dsName ) ) { - throw new NamingException( BaseMessages.getString( PKG, "DatabaseUtil.DSNotFound", String.valueOf( dsName ) ) ); - } - Object foundDs = FoundDS.get( dsName ); - if ( foundDs != null ) { - return (DataSource) foundDs; - } - Object lkup = null; - DataSource rtn = null; - NamingException firstNe = null; - // First, try what they ask for... - try { - lkup = ctx.lookup( dsName ); - if ( lkup instanceof DataSource ) { - rtn = (DataSource) lkup; - FoundDS.put( dsName, rtn ); - return rtn; - } - } catch ( NamingException ignored ) { - firstNe = ignored; - } - try { - // Needed this for Jboss - lkup = ctx.lookup( "java:" + dsName ); - if ( lkup instanceof DataSource ) { - rtn = (DataSource) lkup; - FoundDS.put( dsName, rtn ); - return rtn; - } - } catch ( NamingException ignored ) { - // ignore - } - try { - // Tomcat - lkup = ctx.lookup( "java:comp/env/jdbc/" + dsName ); - if ( lkup instanceof DataSource ) { - rtn = (DataSource) lkup; - FoundDS.put( dsName, rtn ); - return rtn; - } - } catch ( NamingException ignored ) { - // ignore - } - try { - // Others? - lkup = ctx.lookup( "jdbc/" + dsName ); - if ( lkup instanceof DataSource ) { - rtn = (DataSource) lkup; - FoundDS.put( dsName, rtn ); - return rtn; - } - } catch ( NamingException ignored ) { - // ignore - } - if ( firstNe != null ) { - throw firstNe; - } - throw new NamingException( BaseMessages.getString( PKG, "DatabaseUtil.DSNotFound", dsName ) ); - } - - public static void closeSilently( Connection[] connections ) { - if ( connections == null || connections.length == 0 ) { - return; - } - for ( Connection conn : connections ) { - closeSilently( conn ); - } - } - - public static void closeSilently( Connection conn ) { - if ( conn == null ) { - return; - } - try { - conn.close(); - } catch ( Throwable e ) { - // omit - } - } - - public static void closeSilently( Statement[] statements ) { - if ( statements == null || statements.length == 0 ) { - return; - } - for ( Statement st : statements ) { - closeSilently( st ); - } - } - - public static void closeSilently( Statement st ) { - if ( st == null ) { - return; - } - try { - st.close(); - } catch ( Throwable e ) { - // omit - } - } - - - /** - * Implementation of DatasourceProviderInterface. - */ - @Override - public DataSource getNamedDataSource( String datasourceName ) throws DataSourceNamingException { - try { - return DatabaseUtil.getDataSourceFromJndi( datasourceName, new InitialContext() ); - } catch ( NamingException ex ) { - throw new DataSourceNamingException( ex ); - } - } - - @Override - public DataSource getNamedDataSource( String datasourceName, DatasourceType type ) - throws DataSourceNamingException { - if ( type != null ) { - switch( type ) { - case JNDI: - return getNamedDataSource( datasourceName ); - case POOLED: - throw new UnsupportedOperationException( - getClass().getName() + " does not support providing pooled data sources" ); - } + private static Class PKG = Database.class; // for i18n purposes, needed by Translator2!! + private static Map FoundDS = Collections.synchronizedMap(new HashMap()); + + /** + * Clears cache of DataSources (For Unit test) + */ + protected static void clearDSCache() { + FoundDS.clear(); + } + + /** + * Since JNDI is supported different ways in different app servers, it's nearly impossible to have a ubiquitous way to + * look up a datasource. This method is intended to hide all the lookups that may be required to find a jndi name. + * + * @param dsName The Datasource name + * @return DataSource if there is one bound in JNDI + * @throws NamingException + */ + protected static DataSource getDataSourceFromJndi(String dsName, Context ctx) throws NamingException { + if (Const.isEmpty(dsName)) { + throw new NamingException(BaseMessages.getString(PKG, "DatabaseUtil.DSNotFound", String.valueOf(dsName))); + } + Object foundDs = FoundDS.get(dsName); + if (foundDs != null) { + return (DataSource) foundDs; + } + Object lkup = null; + DataSource rtn = null; + NamingException firstNe = null; + // First, try what they ask for... + try { + lkup = ctx.lookup(dsName); + if (lkup instanceof DataSource) { + rtn = (DataSource) lkup; + FoundDS.put(dsName, rtn); + return rtn; + } + } catch (NamingException ignored) { + firstNe = ignored; + } + try { + // Needed this for Jboss + lkup = ctx.lookup("java:" + dsName); + if (lkup instanceof DataSource) { + rtn = (DataSource) lkup; + FoundDS.put(dsName, rtn); + return rtn; + } + } catch (NamingException ignored) { + // ignore + } + try { + // Tomcat + lkup = ctx.lookup("java:comp/env/jdbc/" + dsName); + if (lkup instanceof DataSource) { + rtn = (DataSource) lkup; + FoundDS.put(dsName, rtn); + return rtn; + } + } catch (NamingException ignored) { + // ignore + } + try { + // Others? + lkup = ctx.lookup("jdbc/" + dsName); + if (lkup instanceof DataSource) { + rtn = (DataSource) lkup; + FoundDS.put(dsName, rtn); + return rtn; + } + } catch (NamingException ignored) { + // ignore + } + if (firstNe != null) { + throw firstNe; + } + throw new NamingException(BaseMessages.getString(PKG, "DatabaseUtil.DSNotFound", dsName)); + } + + public static void closeSilently(Connection[] connections) { + if (connections == null || connections.length == 0) { + return; + } + for (Connection conn : connections) { + closeSilently(conn); + } + } + + public static void closeSilently(Connection conn) { + if (conn == null) { + return; + } + try { + conn.close(); + } catch (Throwable e) { + // omit + } + } + + public static void closeSilently(Statement[] statements) { + if (statements == null || statements.length == 0) { + return; + } + for (Statement st : statements) { + closeSilently(st); + } + } + + public static void closeSilently(Statement st) { + if (st == null) { + return; + } + try { + st.close(); + } catch (Throwable e) { + // omit + } + } + + + /** + * Implementation of DatasourceProviderInterface. + */ + @Override + public DataSource getNamedDataSource(String datasourceName) throws DataSourceNamingException { + try { + return DatabaseUtil.getDataSourceFromJndi(datasourceName, new InitialContext()); + } catch (NamingException ex) { + throw new DataSourceNamingException(ex); + } + } + + @Override + public DataSource getNamedDataSource(String datasourceName, DatasourceType type) + throws DataSourceNamingException { + if (type != null) { + switch (type) { + case JNDI: + return getNamedDataSource(datasourceName); + case POOLED: + throw new UnsupportedOperationException( + getClass().getName() + " does not support providing pooled data sources"); + } + } + throw new IllegalArgumentException("Unsupported data source type: " + type); } - throw new IllegalArgumentException( "Unsupported data source type: " + type ); - } } diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/core/logging/LogChannelFileWriter.java b/pentaho-kettle/src/main/java/org/pentaho/di/core/logging/LogChannelFileWriter.java index acf37c9..d227ab5 100644 --- a/pentaho-kettle/src/main/java/org/pentaho/di/core/logging/LogChannelFileWriter.java +++ b/pentaho-kettle/src/main/java/org/pentaho/di/core/logging/LogChannelFileWriter.java @@ -22,195 +22,179 @@ package org.pentaho.di.core.logging; -import java.io.IOException; -import java.io.OutputStream; -import java.util.concurrent.atomic.AtomicBoolean; - import org.apache.commons.vfs2.FileObject; import org.pentaho.di.core.exception.KettleException; import org.pentaho.di.core.vfs.KettleVFS; +import java.io.IOException; +import java.io.OutputStream; +import java.util.concurrent.atomic.AtomicBoolean; + /** * This class takes care of polling the central log store for new log messages belonging to a certain log channel ID * (and children). The log lines will be written to a logging file. * * @author matt - * */ public class LogChannelFileWriter { - private String logChannelId; - private FileObject logFile; - private boolean appending; - private int pollingInterval; - - private AtomicBoolean active; - private KettleException exception; - private int lastBufferLineNr; - protected OutputStream logFileOutputStream; - - /** - * Create a new log channel file writer - * - * @param logChannelId - * The log channel (+children) to write to the log file - * @param logFile - * The logging file to write to - * @param appending - * set to true if you want to append to an existing file - * @param pollingInterval - * The polling interval in milliseconds. - * - * @throws KettleException - * in case the specified log file can't be created. - */ - public LogChannelFileWriter( String logChannelId, FileObject logFile, boolean appending, int pollingInterval ) throws KettleException { - this.logChannelId = logChannelId; - this.logFile = logFile; - this.appending = appending; - this.pollingInterval = pollingInterval; - - active = new AtomicBoolean( false ); - lastBufferLineNr = KettleLogStore.getLastBufferLineNr(); - - try { - logFileOutputStream = KettleVFS.getOutputStream( logFile, appending ); - } catch ( IOException e ) { - throw new KettleException( "There was an error while trying to open file '" + logFile + "' for writing", e ); - } - } - - /** - * Create a new log channel file writer - * - * @param logChannelId - * The log channel (+children) to write to the log file - * @param logFile - * The logging file to write to - * @param appending - * set to true if you want to append to an existing file - * - * @throws KettleException - * in case the specified log file can't be created. - */ - public LogChannelFileWriter( String logChannelId, FileObject logFile, boolean appending ) throws KettleException { - this( logChannelId, logFile, appending, 1000 ); - } - - /** - * Start the logging thread which will write log data from the specified log channel to the log file. In case of an - * error, the exception will be available with method getException(). - */ - public void startLogging() { - - exception = null; - active.set( true ); - - Thread thread = new Thread( new Runnable() { - public void run() { + private String logChannelId; + private FileObject logFile; + private boolean appending; + private int pollingInterval; + + private AtomicBoolean active; + private KettleException exception; + private int lastBufferLineNr; + protected OutputStream logFileOutputStream; + + /** + * Create a new log channel file writer + * + * @param logChannelId The log channel (+children) to write to the log file + * @param logFile The logging file to write to + * @param appending set to true if you want to append to an existing file + * @param pollingInterval The polling interval in milliseconds. + * @throws KettleException in case the specified log file can't be created. + */ + public LogChannelFileWriter(String logChannelId, FileObject logFile, boolean appending, int pollingInterval) throws KettleException { + this.logChannelId = logChannelId; + this.logFile = logFile; + this.appending = appending; + this.pollingInterval = pollingInterval; + + active = new AtomicBoolean(false); + lastBufferLineNr = KettleLogStore.getLastBufferLineNr(); + try { + logFileOutputStream = KettleVFS.getOutputStream(logFile, appending); + } catch (IOException e) { + throw new KettleException("There was an error while trying to open file '" + logFile + "' for writing", e); + } + } + + /** + * Create a new log channel file writer + * + * @param logChannelId The log channel (+children) to write to the log file + * @param logFile The logging file to write to + * @param appending set to true if you want to append to an existing file + * @throws KettleException in case the specified log file can't be created. + */ + public LogChannelFileWriter(String logChannelId, FileObject logFile, boolean appending) throws KettleException { + this(logChannelId, logFile, appending, 1000); + } - while ( active.get() && exception == null ) { - flush(); - Thread.sleep( pollingInterval ); - } - // When done, save the last bit as well... - flush(); - - } catch ( Exception e ) { - exception = new KettleException( "There was an error logging to file '" + logFile + "'", e ); - } finally { - try { - if ( logFileOutputStream != null ) { - logFileOutputStream.close(); - logFileOutputStream = null; + /** + * Start the logging thread which will write log data from the specified log channel to the log file. In case of an + * error, the exception will be available with method getException(). + */ + public void startLogging() { + + exception = null; + active.set(true); + + Thread thread = new Thread(new Runnable() { + public void run() { + try { + + while (active.get() && exception == null) { + flush(); + Thread.sleep(pollingInterval); + } + // When done, save the last bit as well... + flush(); + + } catch (Exception e) { + exception = new KettleException("There was an error logging to file '" + logFile + "'", e); + } finally { + try { + if (logFileOutputStream != null) { + logFileOutputStream.close(); + logFileOutputStream = null; + } + } catch (Exception e) { + exception = new KettleException("There was an error closing log file file '" + logFile + "'", e); + } + } } - } catch ( Exception e ) { - exception = new KettleException( "There was an error closing log file file '" + logFile + "'", e ); - } + }); + thread.start(); + } + + public synchronized void flush() { + try { + int last = KettleLogStore.getLastBufferLineNr(); + StringBuffer buffer = KettleLogStore.getAppender().getBuffer(logChannelId, false, lastBufferLineNr, last); + logFileOutputStream.write(buffer.toString().getBytes()); + lastBufferLineNr = last; + logFileOutputStream.flush(); + } catch (Exception e) { + exception = new KettleException("There was an error logging to file '" + logFile + "'", e); } - } - } ); - thread.start(); - } - - public synchronized void flush() { - try { - int last = KettleLogStore.getLastBufferLineNr(); - StringBuffer buffer = KettleLogStore.getAppender().getBuffer( logChannelId, false, lastBufferLineNr, last ); - logFileOutputStream.write( buffer.toString().getBytes() ); - lastBufferLineNr = last; - logFileOutputStream.flush(); - } catch ( Exception e ) { - exception = new KettleException( "There was an error logging to file '" + logFile + "'", e ); } - } - - public void stopLogging() { - flush(); - active.set( false ); - } - - public KettleException getException() { - return exception; - } - - /** - * @return the logChannelId - */ - public String getLogChannelId() { - return logChannelId; - } - - /** - * @param logChannelId - * the logChannelId to set - */ - public void setLogChannelId( String logChannelId ) { - this.logChannelId = logChannelId; - } - - /** - * @return the logFile - */ - public FileObject getLogFile() { - return logFile; - } - - /** - * @param logFile - * the logFile to set - */ - public void setLogFile( FileObject logFile ) { - this.logFile = logFile; - } - - /** - * @return the appending - */ - public boolean isAppending() { - return appending; - } - - /** - * @param appending - * the appending to set - */ - public void setAppending( boolean appending ) { - this.appending = appending; - } - - /** - * @return the pollingInterval - */ - public int getPollingInterval() { - return pollingInterval; - } - - /** - * @param pollingInterval - * the pollingInterval to set - */ - public void setPollingInterval( int pollingInterval ) { - this.pollingInterval = pollingInterval; - } + + public void stopLogging() { + flush(); + active.set(false); + } + + public KettleException getException() { + return exception; + } + + /** + * @return the logChannelId + */ + public String getLogChannelId() { + return logChannelId; + } + + /** + * @param logChannelId the logChannelId to set + */ + public void setLogChannelId(String logChannelId) { + this.logChannelId = logChannelId; + } + + /** + * @return the logFile + */ + public FileObject getLogFile() { + return logFile; + } + + /** + * @param logFile the logFile to set + */ + public void setLogFile(FileObject logFile) { + this.logFile = logFile; + } + + /** + * @return the appending + */ + public boolean isAppending() { + return appending; + } + + /** + * @param appending the appending to set + */ + public void setAppending(boolean appending) { + this.appending = appending; + } + + /** + * @return the pollingInterval + */ + public int getPollingInterval() { + return pollingInterval; + } + + /** + * @param pollingInterval the pollingInterval to set + */ + public void setPollingInterval(int pollingInterval) { + this.pollingInterval = pollingInterval; + } } diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/core/logging/LoggingRegistry.java b/pentaho-kettle/src/main/java/org/pentaho/di/core/logging/LoggingRegistry.java index 00d906d..0cc99f1 100644 --- a/pentaho-kettle/src/main/java/org/pentaho/di/core/logging/LoggingRegistry.java +++ b/pentaho-kettle/src/main/java/org/pentaho/di/core/logging/LoggingRegistry.java @@ -22,229 +22,224 @@ package org.pentaho.di.core.logging; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.Date; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.ConcurrentHashMap; - import org.pentaho.di.core.Const; import org.pentaho.di.core.util.EnvUtil; +import java.util.*; +import java.util.concurrent.ConcurrentHashMap; + public class LoggingRegistry { - private static LoggingRegistry registry = new LoggingRegistry(); - private Map map; - private Map> childrenMap; - private Date lastModificationTime; - private int maxSize; - private final int DEFAULT_MAX_SIZE = 10000; - - private Object syncObject = new Object(); - - private LoggingRegistry() { - this.map = new ConcurrentHashMap(); - this.childrenMap = new ConcurrentHashMap>(); - - this.lastModificationTime = new Date(); - this.maxSize = Const.toInt( EnvUtil.getSystemProperty( "KETTLE_MAX_LOGGING_REGISTRY_SIZE" ), DEFAULT_MAX_SIZE ); - } - - public static LoggingRegistry getInstance() { - return registry; - } - - public String registerLoggingSource( Object object ) { - synchronized ( this.syncObject ) { - - this.maxSize = Const.toInt( EnvUtil.getSystemProperty( "KETTLE_MAX_LOGGING_REGISTRY_SIZE" ), 10000 ); - - LoggingObject loggingSource = new LoggingObject( object ); - - LoggingObjectInterface found = findExistingLoggingSource( loggingSource ); - if ( found != null ) { - LoggingObjectInterface foundParent = found.getParent(); - LoggingObjectInterface loggingSourceParent = loggingSource.getParent(); - if ( foundParent != null && loggingSourceParent != null ) { - String foundParentLogChannelId = foundParent.getLogChannelId(); - String sourceParentLogChannelId = loggingSourceParent.getLogChannelId(); - if ( foundParentLogChannelId != null && sourceParentLogChannelId != null - && foundParentLogChannelId.equals( sourceParentLogChannelId ) ) { - String foundLogChannelId = found.getLogChannelId(); - if ( foundLogChannelId != null ) { - return foundLogChannelId; - } - } - } - } - - String logChannelId = UUID.randomUUID().toString(); - loggingSource.setLogChannelId( logChannelId ); - - this.map.put( logChannelId, loggingSource ); - - if ( loggingSource.getParent() != null ) { - String parentLogChannelId = loggingSource.getParent().getLogChannelId(); - if ( parentLogChannelId != null ) { - List parentChildren = this.childrenMap.get( parentLogChannelId ); - if ( parentChildren == null ) { - parentChildren = new ArrayList(); - this.childrenMap.put( parentLogChannelId, parentChildren ); - } - parentChildren.add( logChannelId ); - } - } - - this.lastModificationTime = new Date(); - loggingSource.setRegistrationDate( this.lastModificationTime ); - - if ( ( this.maxSize > 0 ) && ( this.map.size() > this.maxSize ) ) { - List all = new ArrayList( this.map.values() ); - Collections.sort( all, new Comparator() { - @Override - public int compare( LoggingObjectInterface o1, LoggingObjectInterface o2 ) { - if ( ( o1 == null ) && ( o2 != null ) ) { - return -1; - } - if ( ( o1 != null ) && ( o2 == null ) ) { - return 1; - } - if ( ( o1 == null ) && ( o2 == null ) ) { - return 0; - } - if ( o1.getRegistrationDate() == null && o2.getRegistrationDate() != null ) { - return -1; + private static LoggingRegistry registry = new LoggingRegistry(); + private Map map; + private Map> childrenMap; + private Date lastModificationTime; + private int maxSize; + private final int DEFAULT_MAX_SIZE = 10000; + + private Object syncObject = new Object(); + + private LoggingRegistry() { + this.map = new ConcurrentHashMap(); + this.childrenMap = new ConcurrentHashMap>(); + + this.lastModificationTime = new Date(); + this.maxSize = Const.toInt(EnvUtil.getSystemProperty("KETTLE_MAX_LOGGING_REGISTRY_SIZE"), DEFAULT_MAX_SIZE); + } + + public static LoggingRegistry getInstance() { + return registry; + } + + public String registerLoggingSource(Object object) { + synchronized (this.syncObject) { + + this.maxSize = Const.toInt(EnvUtil.getSystemProperty("KETTLE_MAX_LOGGING_REGISTRY_SIZE"), 10000); + + LoggingObject loggingSource = new LoggingObject(object); + + LoggingObjectInterface found = findExistingLoggingSource(loggingSource); + if (found != null) { + LoggingObjectInterface foundParent = found.getParent(); + LoggingObjectInterface loggingSourceParent = loggingSource.getParent(); + if (foundParent != null && loggingSourceParent != null) { + String foundParentLogChannelId = foundParent.getLogChannelId(); + String sourceParentLogChannelId = loggingSourceParent.getLogChannelId(); + if (foundParentLogChannelId != null && sourceParentLogChannelId != null + && foundParentLogChannelId.equals(sourceParentLogChannelId)) { + String foundLogChannelId = found.getLogChannelId(); + if (foundLogChannelId != null) { + return foundLogChannelId; + } + } + } } - if ( o1.getRegistrationDate() != null && o2.getRegistrationDate() == null ) { - return 1; + + String logChannelId = UUID.randomUUID().toString(); + loggingSource.setLogChannelId(logChannelId); + + this.map.put(logChannelId, loggingSource); + + if (loggingSource.getParent() != null) { + String parentLogChannelId = loggingSource.getParent().getLogChannelId(); + if (parentLogChannelId != null) { + List parentChildren = this.childrenMap.get(parentLogChannelId); + if (parentChildren == null) { + parentChildren = new ArrayList(); + this.childrenMap.put(parentLogChannelId, parentChildren); + } + parentChildren.add(logChannelId); + } } - if ( o1.getRegistrationDate() == null && o2.getRegistrationDate() == null ) { - return 0; + + this.lastModificationTime = new Date(); + loggingSource.setRegistrationDate(this.lastModificationTime); + + if ((this.maxSize > 0) && (this.map.size() > this.maxSize)) { + List all = new ArrayList(this.map.values()); + Collections.sort(all, new Comparator() { + @Override + public int compare(LoggingObjectInterface o1, LoggingObjectInterface o2) { + if ((o1 == null) && (o2 != null)) { + return -1; + } + if ((o1 != null) && (o2 == null)) { + return 1; + } + if ((o1 == null) && (o2 == null)) { + return 0; + } + if (o1.getRegistrationDate() == null && o2.getRegistrationDate() != null) { + return -1; + } + if (o1.getRegistrationDate() != null && o2.getRegistrationDate() == null) { + return 1; + } + if (o1.getRegistrationDate() == null && o2.getRegistrationDate() == null) { + return 0; + } + return (o1.getRegistrationDate().compareTo(o2.getRegistrationDate())); + } + }); + int cutCount = this.maxSize < 1000 ? this.maxSize : 1000; + for (int i = 0; i < cutCount; i++) { + LoggingObjectInterface toRemove = all.get(i); + this.map.remove(toRemove.getLogChannelId()); + } + removeOrphans(); } - return ( o1.getRegistrationDate().compareTo( o2.getRegistrationDate() ) ); - } - } ); - int cutCount = this.maxSize < 1000 ? this.maxSize : 1000; - for ( int i = 0; i < cutCount; i++ ) { - LoggingObjectInterface toRemove = all.get( i ); - this.map.remove( toRemove.getLogChannelId() ); + return logChannelId; } - removeOrphans(); - } - return logChannelId; } - } - - public LoggingObjectInterface findExistingLoggingSource( LoggingObjectInterface loggingObject ) { - LoggingObjectInterface found = null; - for ( LoggingObjectInterface verify : this.map.values() ) { - if ( loggingObject.equals( verify ) ) { - found = verify; - break; - } + + public LoggingObjectInterface findExistingLoggingSource(LoggingObjectInterface loggingObject) { + LoggingObjectInterface found = null; + for (LoggingObjectInterface verify : this.map.values()) { + if (loggingObject.equals(verify)) { + found = verify; + break; + } + } + return found; } - return found; - } - public LoggingObjectInterface getLoggingObject( String logChannelId ) { - return this.map.get( logChannelId ); - } + public LoggingObjectInterface getLoggingObject(String logChannelId) { + return this.map.get(logChannelId); + } - public Map getMap() { - return this.map; - } + public Map getMap() { + return this.map; + } - public List getLogChannelChildren( String parentLogChannelId ) { - if ( parentLogChannelId == null ) { - return null; + public List getLogChannelChildren(String parentLogChannelId) { + if (parentLogChannelId == null) { + return null; + } + List list = getLogChannelChildren(new ArrayList(), parentLogChannelId); + list.add(parentLogChannelId); + return list; } - List list = getLogChannelChildren( new ArrayList(), parentLogChannelId ); - list.add( parentLogChannelId ); - return list; - } - - private List getLogChannelChildren( List children, String parentLogChannelId ) { - synchronized ( this.syncObject ) { - List list = this.childrenMap.get( parentLogChannelId ); - if ( list == null ) { - // Don't do anything, just return the input. + + private List getLogChannelChildren(List children, String parentLogChannelId) { + synchronized (this.syncObject) { + List list = this.childrenMap.get(parentLogChannelId); + if (list == null) { + // Don't do anything, just return the input. + return children; + } + + Iterator kids = list.iterator(); + while (kids.hasNext()) { + String logChannelId = kids.next(); + + // Add the children recursively + getLogChannelChildren(children, logChannelId); + + // Also add the current parent + children.add(logChannelId); + } + } + return children; - } + } - Iterator kids = list.iterator(); - while ( kids.hasNext() ) { - String logChannelId = kids.next(); + public Date getLastModificationTime() { + return this.lastModificationTime; + } - // Add the children recursively - getLogChannelChildren( children, logChannelId ); + public String dump(boolean includeGeneral) { + StringBuffer out = new StringBuffer(50000); + for (LoggingObjectInterface o : this.map.values()) { + if ((includeGeneral) || (!o.getObjectType().equals(LoggingObjectType.GENERAL))) { + out.append(o.getContainerObjectId()); + out.append("\t"); + out.append(o.getLogChannelId()); + out.append("\t"); + out.append(o.getObjectType().name()); + out.append("\t"); + out.append(o.getObjectName()); + out.append("\t"); + out.append(o.getParent() != null ? o.getParent().getLogChannelId() : "-"); + out.append("\t"); + out.append(o.getParent() != null ? o.getParent().getObjectType().name() : "-"); + out.append("\t"); + out.append(o.getParent() != null ? o.getParent().getObjectName() : "-"); + out.append("\n"); + } + } + return out.toString(); + } - // Also add the current parent - children.add( logChannelId ); - } + /** + * For junit testing purposes + * + * @return ro items map + */ + Map dumpItems() { + return Collections.unmodifiableMap(this.map); } - return children; - } - - public Date getLastModificationTime() { - return this.lastModificationTime; - } - - public String dump( boolean includeGeneral ) { - StringBuffer out = new StringBuffer( 50000 ); - for ( LoggingObjectInterface o : this.map.values() ) { - if ( ( includeGeneral ) || ( !o.getObjectType().equals( LoggingObjectType.GENERAL ) ) ) { - out.append( o.getContainerObjectId() ); - out.append( "\t" ); - out.append( o.getLogChannelId() ); - out.append( "\t" ); - out.append( o.getObjectType().name() ); - out.append( "\t" ); - out.append( o.getObjectName() ); - out.append( "\t" ); - out.append( o.getParent() != null ? o.getParent().getLogChannelId() : "-" ); - out.append( "\t" ); - out.append( o.getParent() != null ? o.getParent().getObjectType().name() : "-" ); - out.append( "\t" ); - out.append( o.getParent() != null ? o.getParent().getObjectName() : "-" ); - out.append( "\n" ); - } + /** + * For junit testing purposes + * + * @return ro parent-child relations map + */ + Map> dumpChildren() { + return Collections.unmodifiableMap(this.childrenMap); } - return out.toString(); - } - - /** - * For junit testing purposes - * @return ro items map - */ - Map dumpItems() { - return Collections.unmodifiableMap( this.map ); - } - - /** - * For junit testing purposes - * @return ro parent-child relations map - */ - Map> dumpChildren() { - return Collections.unmodifiableMap( this.childrenMap ); - } - - public void removeIncludingChildren( String logChannelId ) { - synchronized ( this.map ) { - List children = getLogChannelChildren( logChannelId ); - for ( String child : children ) { - this.map.remove( child ); - } - this.map.remove( logChannelId ); - removeOrphans(); + + public void removeIncludingChildren(String logChannelId) { + synchronized (this.map) { + List children = getLogChannelChildren(logChannelId); + for (String child : children) { + this.map.remove(child); + } + this.map.remove(logChannelId); + removeOrphans(); + } } - } - public void removeOrphans() { - // Remove all orphaned children - this.childrenMap.keySet().retainAll( this.map.keySet() ); - } + public void removeOrphans() { + // Remove all orphaned children + this.childrenMap.keySet().retainAll(this.map.keySet()); + } } diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/job/Job.java b/pentaho-kettle/src/main/java/org/pentaho/di/job/Job.java index 760970e..f1978b1 100644 --- a/pentaho-kettle/src/main/java/org/pentaho/di/job/Job.java +++ b/pentaho-kettle/src/main/java/org/pentaho/di/job/Job.java @@ -23,34 +23,10 @@ package org.pentaho.di.job; -import java.net.URLEncoder; -import java.util.ArrayList; -import java.util.Date; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Queue; -import java.util.Timer; -import java.util.TimerTask; -import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; - import org.apache.commons.vfs2.FileName; import org.apache.commons.vfs2.FileObject; import org.pentaho.di.cluster.SlaveServer; -import org.pentaho.di.core.Const; -import org.pentaho.di.core.ExecutorInterface; -import org.pentaho.di.core.ExtensionDataInterface; -import org.pentaho.di.core.KettleEnvironment; -import org.pentaho.di.core.Result; -import org.pentaho.di.core.RowMetaAndData; +import org.pentaho.di.core.*; import org.pentaho.di.core.database.Database; import org.pentaho.di.core.database.DatabaseMeta; import org.pentaho.di.core.exception.KettleDatabaseException; @@ -60,22 +36,7 @@ import org.pentaho.di.core.extension.ExtensionPointHandler; import org.pentaho.di.core.extension.KettleExtensionPoint; import org.pentaho.di.core.gui.JobTracker; -import org.pentaho.di.core.logging.ChannelLogTable; -import org.pentaho.di.core.logging.DefaultLogLevel; -import org.pentaho.di.core.logging.HasLogChannelInterface; -import org.pentaho.di.core.logging.JobEntryLogTable; -import org.pentaho.di.core.logging.JobLogTable; -import org.pentaho.di.core.logging.KettleLogStore; -import org.pentaho.di.core.logging.LogChannel; -import org.pentaho.di.core.logging.LogChannelInterface; -import org.pentaho.di.core.logging.LogLevel; -import org.pentaho.di.core.logging.LogStatus; -import org.pentaho.di.core.logging.LoggingBuffer; -import org.pentaho.di.core.logging.LoggingHierarchy; -import org.pentaho.di.core.logging.LoggingObjectInterface; -import org.pentaho.di.core.logging.LoggingObjectType; -import org.pentaho.di.core.logging.LoggingRegistry; -import org.pentaho.di.core.logging.Metrics; +import org.pentaho.di.core.logging.*; import org.pentaho.di.core.parameters.DuplicateParamException; import org.pentaho.di.core.parameters.NamedParams; import org.pentaho.di.core.parameters.NamedParamsDefault; @@ -99,13 +60,15 @@ import org.pentaho.di.resource.ResourceUtil; import org.pentaho.di.resource.TopLevelResource; import org.pentaho.di.trans.Trans; -import org.pentaho.di.www.AddExportServlet; -import org.pentaho.di.www.RegisterJobServlet; -import org.pentaho.di.www.SocketRepository; -import org.pentaho.di.www.StartJobServlet; -import org.pentaho.di.www.WebResult; +import org.pentaho.di.www.*; import org.pentaho.metastore.api.IMetaStore; +import java.net.URLEncoder; +import java.util.*; +import java.util.concurrent.*; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + /** * This class executes a job as defined by a JobMeta object. *

@@ -115,2382 +78,2341 @@ * * @author Matt Casters * @since 07-apr-2003 - * */ public class Job extends Thread implements VariableSpace, NamedParams, HasLogChannelInterface, LoggingObjectInterface, - ExecutorInterface, ExtensionDataInterface { - private static Class PKG = Job.class; // for i18n purposes, needed by Translator2!! + ExecutorInterface, ExtensionDataInterface { + private static Class PKG = Job.class; // for i18n purposes, needed by Translator2!! - public static final String CONFIGURATION_IN_EXPORT_FILENAME = "__job_execution_configuration__.xml"; + public static final String CONFIGURATION_IN_EXPORT_FILENAME = "__job_execution_configuration__.xml"; - private LogChannelInterface log; + private LogChannelInterface log; - private LogLevel logLevel = DefaultLogLevel.getLogLevel(); + private LogLevel logLevel = DefaultLogLevel.getLogLevel(); - private String containerObjectId; + private String containerObjectId; - private JobMeta jobMeta; + private JobMeta jobMeta; - private int logCommitSize = 10; + private int logCommitSize = 10; - private Repository rep; + private Repository rep; - private AtomicInteger errors; + private AtomicInteger errors; - private VariableSpace variables = new Variables(); + private VariableSpace variables = new Variables(); - /** - * The job that's launching this (sub-) job. This gives us access to the whole chain, including the parent variables, - * etc. - */ - protected Job parentJob; + /** + * The job that's launching this (sub-) job. This gives us access to the whole chain, including the parent variables, + * etc. + */ + protected Job parentJob; - /** - * The parent transformation - */ - protected Trans parentTrans; + /** + * The parent transformation + */ + protected Trans parentTrans; - /** The parent logging interface to reference */ - private LoggingObjectInterface parentLoggingObject; + /** + * The parent logging interface to reference + */ + private LoggingObjectInterface parentLoggingObject; - /** - * Keep a list of the job entries that were executed. org.pentaho.di.core.logging.CentralLogStore.getInstance() - */ - private JobTracker jobTracker; + /** + * Keep a list of the job entries that were executed. org.pentaho.di.core.logging.CentralLogStore.getInstance() + */ + private JobTracker jobTracker; - /** - * A flat list of results in THIS job, in the order of execution of job entries - */ - private final LinkedList jobEntryResults = new LinkedList(); + /** + * A flat list of results in THIS job, in the order of execution of job entries + */ + private final LinkedList jobEntryResults = new LinkedList(); - private Date startDate, endDate, currentDate, logDate, depDate; + private Date startDate, endDate, currentDate, logDate, depDate; - private AtomicBoolean active; + private AtomicBoolean active; - private AtomicBoolean stopped; + private AtomicBoolean stopped; - private long batchId; + private long batchId; - /** - * This is the batch ID that is passed from job to job to transformation, if nothing is passed, it's the job's batch - * id - */ - private long passedBatchId; + /** + * This is the batch ID that is passed from job to job to transformation, if nothing is passed, it's the job's batch + * id + */ + private long passedBatchId; - /** - * The rows that were passed onto this job by a previous transformation. These rows are passed onto the first job - * entry in this job (on the result object) - */ - private List sourceRows; + /** + * The rows that were passed onto this job by a previous transformation. These rows are passed onto the first job + * entry in this job (on the result object) + */ + private List sourceRows; - /** - * The result of the job, after execution. - */ - private Result result; + /** + * The result of the job, after execution. + */ + private Result result; - private AtomicBoolean initialized; + private AtomicBoolean initialized; - private boolean interactive; + private boolean interactive; - private List jobListeners; + private List jobListeners; - private List jobEntryListeners; + private List jobEntryListeners; - private List delegationListeners; + private List delegationListeners; - private Map activeJobEntryTransformations; + private Map activeJobEntryTransformations; - private Map activeJobEntryJobs; + private Map activeJobEntryJobs; - /** - * Parameters of the job. - */ - private NamedParams namedParams = new NamedParamsDefault(); + /** + * Parameters of the job. + */ + private NamedParams namedParams = new NamedParamsDefault(); - private AtomicBoolean finished; + private AtomicBoolean finished; - private SocketRepository socketRepository; + private SocketRepository socketRepository; - private int maxJobEntriesLogged; + private int maxJobEntriesLogged; - private JobEntryCopy startJobEntryCopy; - private Result startJobEntryResult; + private JobEntryCopy startJobEntryCopy; + private Result startJobEntryResult; - private String executingServer; + private String executingServer; - private String executingUser; + private String executingUser; - private String transactionId; + private String transactionId; - private Map extensionDataMap; + private Map extensionDataMap; - /** The command line arguments for the job. */ - protected String[] arguments; + /** + * The command line arguments for the job. + */ + protected String[] arguments; - /** - * Instantiates a new job. - * - * @param name - * the name - * @param file - * the file - * @param args - * the args - */ - public Job( String name, String file, String[] args ) { - this(); - jobMeta = new JobMeta(); + /** + * Instantiates a new job. + * + * @param name the name + * @param file the file + * @param args the args + */ + public Job(String name, String file, String[] args) { + this(); + jobMeta = new JobMeta(); + + if (name != null) { + setName(name + " (" + super.getName() + ")"); + } + jobMeta.setName(name); + jobMeta.setFilename(file); + this.arguments = args; - if ( name != null ) { - setName( name + " (" + super.getName() + ")" ); + init(); + this.log = new LogChannel(this); } - jobMeta.setName( name ); - jobMeta.setFilename( file ); - this.arguments = args; - init(); - this.log = new LogChannel( this ); - } + /** + * Initializes the Job. + */ + public void init() { + jobListeners = new ArrayList(); + jobEntryListeners = new ArrayList(); + delegationListeners = new ArrayList(); - /** - * Initializes the Job. - */ - public void init() { - jobListeners = new ArrayList(); - jobEntryListeners = new ArrayList(); - delegationListeners = new ArrayList(); - - activeJobEntryTransformations = new HashMap(); - activeJobEntryJobs = new HashMap(); - - extensionDataMap = new HashMap(); - - active = new AtomicBoolean( false ); - stopped = new AtomicBoolean( false ); - jobTracker = new JobTracker( jobMeta ); - synchronized ( jobEntryResults ) { - jobEntryResults.clear(); - } - initialized = new AtomicBoolean( false ); - finished = new AtomicBoolean( false ); - errors = new AtomicInteger( 0 ); - batchId = -1; - passedBatchId = -1; - maxJobEntriesLogged = Const.toInt( EnvUtil.getSystemProperty( Const.KETTLE_MAX_JOB_ENTRIES_LOGGED ), 1000 ); - - result = null; - startJobEntryCopy = null; - startJobEntryResult = null; - - this.setDefaultLogCommitSize(); - } - - /** - * Sets the default log commit size. - */ - private void setDefaultLogCommitSize() { - String propLogCommitSize = this.getVariable( "pentaho.log.commit.size" ); - if ( propLogCommitSize != null ) { - // override the logCommit variable - try { - logCommitSize = Integer.parseInt( propLogCommitSize ); - } catch ( Exception ignored ) { - logCommitSize = 10; // ignore parsing error and default to 10 - } - } - } - - /** - * Instantiates a new job. - * - * @param repository - * the repository - * @param jobMeta - * the job meta - */ - public Job( Repository repository, JobMeta jobMeta ) { - this( repository, jobMeta, null ); - } + activeJobEntryTransformations = new HashMap(); + activeJobEntryJobs = new HashMap(); - /** - * Instantiates a new job. - * - * @param repository - * the repository - * @param jobMeta - * the job meta - * @param parentLogging - * the parent logging - */ - public Job( Repository repository, JobMeta jobMeta, LoggingObjectInterface parentLogging ) { - this.rep = repository; - this.jobMeta = jobMeta; - this.parentLoggingObject = parentLogging; + extensionDataMap = new HashMap(); - init(); + active = new AtomicBoolean(false); + stopped = new AtomicBoolean(false); + jobTracker = new JobTracker(jobMeta); + synchronized (jobEntryResults) { + jobEntryResults.clear(); + } + initialized = new AtomicBoolean(false); + finished = new AtomicBoolean(false); + errors = new AtomicInteger(0); + batchId = -1; + passedBatchId = -1; + maxJobEntriesLogged = Const.toInt(EnvUtil.getSystemProperty(Const.KETTLE_MAX_JOB_ENTRIES_LOGGED), 1000); + + result = null; + startJobEntryCopy = null; + startJobEntryResult = null; + + this.setDefaultLogCommitSize(); + } - jobTracker = new JobTracker( jobMeta ); + /** + * Sets the default log commit size. + */ + private void setDefaultLogCommitSize() { + String propLogCommitSize = this.getVariable("pentaho.log.commit.size"); + if (propLogCommitSize != null) { + // override the logCommit variable + try { + logCommitSize = Integer.parseInt(propLogCommitSize); + } catch (Exception ignored) { + logCommitSize = 10; // ignore parsing error and default to 10 + } + } + } - this.log = new LogChannel( this, parentLogging ); - this.logLevel = log.getLogLevel(); - this.containerObjectId = log.getContainerObjectId(); - } + /** + * Instantiates a new job. + * + * @param repository the repository + * @param jobMeta the job meta + */ + public Job(Repository repository, JobMeta jobMeta) { + this(repository, jobMeta, null); + } - /** - * Empty constructor, for Class.newInstance() - */ - public Job() { - init(); - this.log = new LogChannel( this ); - this.logLevel = log.getLogLevel(); - } - - /** - * Gets the name property of the JobMeta property. - * - * @return String name for the JobMeta - */ - @Override - public String toString() { - if ( jobMeta == null || Const.isEmpty( jobMeta.getName() ) ) { - return getName(); - } else { - return jobMeta.getName(); + /** + * Instantiates a new job. + * + * @param repository the repository + * @param jobMeta the job meta + * @param parentLogging the parent logging + */ + public Job(Repository repository, JobMeta jobMeta, LoggingObjectInterface parentLogging) { + this.rep = repository; + this.jobMeta = jobMeta; + this.parentLoggingObject = parentLogging; + + init(); + + jobTracker = new JobTracker(jobMeta); + + this.log = new LogChannel(this, parentLogging); + this.logLevel = log.getLogLevel(); + this.containerObjectId = log.getContainerObjectId(); } - } - /** - * Creates the job with new class loader. - * - * @return the job - * @throws KettleException - * the kettle exception - */ - public static final Job createJobWithNewClassLoader() throws KettleException { - try { - // Load the class. - Class jobClass = Const.createNewClassLoader().loadClass( Job.class.getName() ); + /** + * Empty constructor, for Class.newInstance() + */ + public Job() { + init(); + this.log = new LogChannel(this); + this.logLevel = log.getLogLevel(); + } - // create the class - // Try to instantiate this one... - Job job = (Job) jobClass.newInstance(); + /** + * Gets the name property of the JobMeta property. + * + * @return String name for the JobMeta + */ + @Override + public String toString() { + if (jobMeta == null || Const.isEmpty(jobMeta.getName())) { + return getName(); + } else { + return jobMeta.getName(); + } + } - // Done! - return job; - } catch ( Exception e ) { - String message = BaseMessages.getString( PKG, "Job.Log.ErrorAllocatingNewJob", e.toString() ); - throw new KettleException( message, e ); + /** + * Creates the job with new class loader. + * + * @return the job + * @throws KettleException the kettle exception + */ + public static final Job createJobWithNewClassLoader() throws KettleException { + try { + // Load the class. + Class jobClass = Const.createNewClassLoader().loadClass(Job.class.getName()); + + // create the class + // Try to instantiate this one... + Job job = (Job) jobClass.newInstance(); + + // Done! + return job; + } catch (Exception e) { + String message = BaseMessages.getString(PKG, "Job.Log.ErrorAllocatingNewJob", e.toString()); + throw new KettleException(message, e); + } } - } - /** - * Gets the jobname. - * - * @return the jobname - */ - public String getJobname() { - if ( jobMeta == null ) { - return null; + /** + * Gets the jobname. + * + * @return the jobname + */ + public String getJobname() { + if (jobMeta == null) { + return null; + } + + return jobMeta.getName(); } - return jobMeta.getName(); - } + /** + * Sets the repository. + * + * @param rep the new repository + */ + public void setRepository(Repository rep) { + this.rep = rep; + } - /** - * Sets the repository. - * - * @param rep - * the new repository - */ - public void setRepository( Repository rep ) { - this.rep = rep; - } + /** + * Threads main loop: called by Thread.start(); + */ + public void run() { - /** - * Threads main loop: called by Thread.start(); - */ - public void run() { - - ExecutorService heartbeat = null; // this job's heartbeat scheduled executor - - try { - stopped = new AtomicBoolean( false ); - finished = new AtomicBoolean( false ); - initialized = new AtomicBoolean( true ); - - // Create a new variable name space as we want jobs to have their own set of variables. - // initialize from parentJob or null - // - variables.initializeVariablesFrom( parentJob ); - setInternalKettleVariables( variables ); - copyParametersFrom( jobMeta ); - activateParameters(); - - // Run the job - // - fireJobStartListeners(); - - heartbeat = startHeartbeat( getHeartbeatIntervalInSeconds() ); - - result = execute(); - } catch ( Throwable je ) { - log.logError( BaseMessages.getString( PKG, "Job.Log.ErrorExecJob", je.getMessage() ), je ); - // log.logError(Const.getStackTracker(je)); - // - // we don't have result object because execute() threw a curve-ball. - // So we create a new error object. - // - result = new Result(); - result.setNrErrors( 1L ); - result.setResult( false ); - addErrors( 1 ); // This can be before actual execution - - emergencyWriteJobTracker( result ); - - active.set( false ); - finished.set( true ); - stopped.set( false ); - } finally { - try { - - shutdownHeartbeat( heartbeat ); - - ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.JobFinish.id, this ); - - fireJobFinishListeners(); - } catch ( KettleException e ) { - result.setNrErrors( 1 ); - result.setResult( false ); - log.logError( BaseMessages.getString( PKG, "Job.Log.ErrorExecJob", e.getMessage() ), e ); - - emergencyWriteJobTracker( result ); - } - } - } - - private void emergencyWriteJobTracker( Result res ) { - JobEntryResult jerFinalResult = - new JobEntryResult( res, this.getLogChannelId(), BaseMessages.getString( PKG, "Job.Comment.JobFinished" ), null, - null, 0, null ); - JobTracker finalTrack = new JobTracker( this.getJobMeta(), jerFinalResult ); - // jobTracker is up to date too. - this.jobTracker.addJobTracker( finalTrack ); - } - - /** - * Execute a job without previous results. This is a job entry point (not recursive)
- *
- * - * @return the result of the execution - * - * @throws KettleException - */ - private Result execute() throws KettleException { - try { - log.snap( Metrics.METRIC_JOB_START ); + ExecutorService heartbeat = null; // this job's heartbeat scheduled executor - finished.set( false ); - stopped.set( false ); - KettleEnvironment.setExecutionInformation( this, rep ); + try { + stopped = new AtomicBoolean(false); + finished = new AtomicBoolean(false); + initialized = new AtomicBoolean(true); - log.logMinimal( BaseMessages.getString( PKG, "Job.Comment.JobStarted" ) ); + // Create a new variable name space as we want jobs to have their own set of variables. + // initialize from parentJob or null + // + variables.initializeVariablesFrom(parentJob); + setInternalKettleVariables(variables); + copyParametersFrom(jobMeta); + activateParameters(); - ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.JobStart.id, this ); + // Run the job + // + fireJobStartListeners(); - // Start the tracking... - JobEntryResult jerStart = - new JobEntryResult( null, null, BaseMessages.getString( PKG, "Job.Comment.JobStarted" ), BaseMessages - .getString( PKG, "Job.Reason.Started" ), null, 0, null ); - jobTracker.addJobTracker( new JobTracker( jobMeta, jerStart ) ); + heartbeat = startHeartbeat(getHeartbeatIntervalInSeconds()); - active.set( true ); + result = execute(); + } catch (Throwable je) { + log.logError(BaseMessages.getString(PKG, "Job.Log.ErrorExecJob", je.getMessage()), je); + // log.logError(Const.getStackTracker(je)); + // + // we don't have result object because execute() threw a curve-ball. + // So we create a new error object. + // + result = new Result(); + result.setNrErrors(1L); + result.setResult(false); + addErrors(1); // This can be before actual execution - // Where do we start? - JobEntryCopy startpoint; + emergencyWriteJobTracker(result); - // synchronize this to a parent job if needed. - // - Object syncObject = this; - if ( parentJob != null ) { - syncObject = parentJob; // parallel execution in a job - } + active.set(false); + finished.set(true); + stopped.set(false); + } finally { + try { - synchronized ( syncObject ) { - beginProcessing(); - } + shutdownHeartbeat(heartbeat); - Result res = null; + ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.JobFinish.id, this); - if ( startJobEntryCopy == null ) { - startpoint = jobMeta.findJobEntry( JobMeta.STRING_SPECIAL_START, 0, false ); - } else { - startpoint = startJobEntryCopy; - res = startJobEntryResult; - } - if ( startpoint == null ) { - throw new KettleJobException( BaseMessages.getString( PKG, "Job.Log.CounldNotFindStartingPoint" ) ); - } + fireJobFinishListeners(); + } catch (KettleException e) { + result.setNrErrors(1); + result.setResult(false); + log.logError(BaseMessages.getString(PKG, "Job.Log.ErrorExecJob", e.getMessage()), e); - JobEntryResult jerEnd = null; + emergencyWriteJobTracker(result); + } + } + } - if ( startpoint.isStart() ) { - // Perform optional looping in the special Start job entry... - // - // long iteration = 0; + private void emergencyWriteJobTracker(Result res) { + JobEntryResult jerFinalResult = + new JobEntryResult(res, this.getLogChannelId(), BaseMessages.getString(PKG, "Job.Comment.JobFinished"), null, + null, 0, null); + JobTracker finalTrack = new JobTracker(this.getJobMeta(), jerFinalResult); + // jobTracker is up to date too. + this.jobTracker.addJobTracker(finalTrack); + } - boolean isFirst = true; - JobEntrySpecial jes = (JobEntrySpecial) startpoint.getEntry(); - while ( ( jes.isRepeat() || isFirst ) && !isStopped() ) { - isFirst = false; - res = execute( 0, null, startpoint, null, BaseMessages.getString( PKG, "Job.Reason.Started" ) ); + /** + * Execute a job without previous results. This is a job entry point (not recursive)
+ *
+ * + * @return the result of the execution + * @throws KettleException + */ + private Result execute() throws KettleException { + try { + log.snap(Metrics.METRIC_JOB_START); - // - // if (iteration > 0 && (iteration % 500) == 0) { - // System.out.println("other 500 iterations: " + iteration); - // } + finished.set(false); + stopped.set(false); + KettleEnvironment.setExecutionInformation(this, rep); - // iteration++; - // - } - jerEnd = - new JobEntryResult( res, jes.getLogChannelId(), BaseMessages.getString( PKG, "Job.Comment.JobFinished" ), - BaseMessages.getString( PKG, "Job.Reason.Finished" ), null, 0, null ); - } else { - res = execute( 0, res, startpoint, null, BaseMessages.getString( PKG, "Job.Reason.Started" ) ); - jerEnd = - new JobEntryResult( res, startpoint.getEntry().getLogChannel().getLogChannelId(), BaseMessages.getString( - PKG, "Job.Comment.JobFinished" ), BaseMessages.getString( PKG, "Job.Reason.Finished" ), null, 0, null ); - } - // Save this result... - jobTracker.addJobTracker( new JobTracker( jobMeta, jerEnd ) ); - log.logMinimal( BaseMessages.getString( PKG, "Job.Comment.JobFinished" ) ); - - active.set( false ); - finished.set( true ); - - return res; - } finally { - log.snap( Metrics.METRIC_JOB_STOP ); - } - } - - /** - * Execute a job with previous results passed in.
- *
- * Execute called by JobEntryJob: don't clear the jobEntryResults. - * - * @param nr - * The job entry number - * @param result - * the result of the previous execution - * @return Result of the job execution - * @throws KettleJobException - */ - public Result execute( int nr, Result result ) throws KettleException { - finished.set( false ); - active.set( true ); - initialized.set( true ); - KettleEnvironment.setExecutionInformation( this, rep ); + log.logMinimal(BaseMessages.getString(PKG, "Job.Comment.JobStarted")); - // Where do we start? - JobEntryCopy startpoint; + ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.JobStart.id, this); - // Perhaps there is already a list of input rows available? - if ( getSourceRows() != null ) { - result.setRows( getSourceRows() ); - } + // Start the tracking... + JobEntryResult jerStart = + new JobEntryResult(null, null, BaseMessages.getString(PKG, "Job.Comment.JobStarted"), BaseMessages + .getString(PKG, "Job.Reason.Started"), null, 0, null); + jobTracker.addJobTracker(new JobTracker(jobMeta, jerStart)); - startpoint = jobMeta.findJobEntry( JobMeta.STRING_SPECIAL_START, 0, false ); - if ( startpoint == null ) { - throw new KettleJobException( BaseMessages.getString( PKG, "Job.Log.CounldNotFindStartingPoint" ) ); - } + active.set(true); - Result res = execute( nr, result, startpoint, null, BaseMessages.getString( PKG, "Job.Reason.StartOfJobentry" ) ); + // Where do we start? + JobEntryCopy startpoint; - active.set( false ); + // synchronize this to a parent job if needed. + // + Object syncObject = this; + if (parentJob != null) { + syncObject = parentJob; // parallel execution in a job + } - return res; - } + synchronized (syncObject) { + beginProcessing(); + } - /** - * Sets the finished flag. Then launch all the job listeners and call the jobFinished method for each.
- * - * @see JobListener#jobFinished(Job) - */ - public void fireJobFinishListeners() throws KettleException { - synchronized ( jobListeners ) { - for ( JobListener jobListener : jobListeners ) { - jobListener.jobFinished( this ); - } - } - } + Result res = null; - /** - * Call all the jobStarted method for each listener.
- * - * @see JobListener#jobStarted(Job) - */ - public void fireJobStartListeners() throws KettleException { - synchronized ( jobListeners ) { - for ( JobListener jobListener : jobListeners ) { - jobListener.jobStarted( this ); - } - } - } + if (startJobEntryCopy == null) { + startpoint = jobMeta.findJobEntry(JobMeta.STRING_SPECIAL_START, 0, false); + } else { + startpoint = startJobEntryCopy; + res = startJobEntryResult; + } + if (startpoint == null) { + throw new KettleJobException(BaseMessages.getString(PKG, "Job.Log.CounldNotFindStartingPoint")); + } - /** - * Execute a job entry recursively and move to the next job entry automatically.
- * Uses a back-tracking algorithm.
- * - * @param nr - * @param prev_result - * @param jobEntryCopy - * @param previous - * @param reason - * @return - * @throws KettleException - */ - private Result execute( final int nr, Result prev_result, final JobEntryCopy jobEntryCopy, JobEntryCopy previous, - String reason ) throws KettleException { - Result res = null; + JobEntryResult jerEnd = null; - if ( stopped.get() ) { - res = new Result( nr ); - res.stopped = true; - return res; + if (startpoint.isStart()) { + // Perform optional looping in the special Start job entry... + // + // long iteration = 0; + + boolean isFirst = true; + JobEntrySpecial jes = (JobEntrySpecial) startpoint.getEntry(); + while ((jes.isRepeat() || isFirst) && !isStopped()) { + isFirst = false; + res = execute(0, null, startpoint, null, BaseMessages.getString(PKG, "Job.Reason.Started")); + + // + // if (iteration > 0 && (iteration % 500) == 0) { + // System.out.println("other 500 iterations: " + iteration); + // } + + // iteration++; + // + } + jerEnd = + new JobEntryResult(res, jes.getLogChannelId(), BaseMessages.getString(PKG, "Job.Comment.JobFinished"), + BaseMessages.getString(PKG, "Job.Reason.Finished"), null, 0, null); + } else { + res = execute(0, res, startpoint, null, BaseMessages.getString(PKG, "Job.Reason.Started")); + jerEnd = + new JobEntryResult(res, startpoint.getEntry().getLogChannel().getLogChannelId(), BaseMessages.getString( + PKG, "Job.Comment.JobFinished"), BaseMessages.getString(PKG, "Job.Reason.Finished"), null, 0, null); + } + // Save this result... + jobTracker.addJobTracker(new JobTracker(jobMeta, jerEnd)); + log.logMinimal(BaseMessages.getString(PKG, "Job.Comment.JobFinished")); + + active.set(false); + finished.set(true); + + return res; + } finally { + log.snap(Metrics.METRIC_JOB_STOP); + } } - // if we didn't have a previous result, create one, otherwise, copy the content... - // - final Result newResult; - Result prevResult = null; - if ( prev_result != null ) { - prevResult = prev_result.clone(); - } else { - prevResult = new Result(); - } - - JobExecutionExtension extension = new JobExecutionExtension( this, prevResult, jobEntryCopy, true ); - ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.JobBeforeJobEntryExecution.id, extension ); - - if ( extension.result != null ) { - prevResult = extension.result; - } - - if ( !extension.executeEntry ) { - newResult = prevResult; - } else { - if ( log.isDetailed() ) { - log.logDetailed( "exec(" + nr + ", " + ( prev_result != null ? prev_result.getNrErrors() : 0 ) + ", " - + ( jobEntryCopy != null ? jobEntryCopy.toString() : "null" ) + ")" ); - } - - // Which entry is next? - JobEntryInterface jobEntryInterface = jobEntryCopy.getEntry(); - jobEntryInterface.getLogChannel().setLogLevel( logLevel ); - - // Track the fact that we are going to launch the next job entry... - JobEntryResult jerBefore = - new JobEntryResult( null, null, BaseMessages.getString( PKG, "Job.Comment.JobStarted" ), reason, jobEntryCopy - .getName(), jobEntryCopy.getNr(), environmentSubstitute( jobEntryCopy.getEntry().getFilename() ) ); - jobTracker.addJobTracker( new JobTracker( jobMeta, jerBefore ) ); - - ClassLoader cl = Thread.currentThread().getContextClassLoader(); - Thread.currentThread().setContextClassLoader( jobEntryInterface.getClass().getClassLoader() ); - // Execute this entry... - JobEntryInterface cloneJei = (JobEntryInterface) jobEntryInterface.clone(); - ( (VariableSpace) cloneJei ).copyVariablesFrom( this ); - cloneJei.setRepository( rep ); - if ( rep != null ) { - cloneJei.setMetaStore( rep.getMetaStore() ); - } - cloneJei.setParentJob( this ); - final long start = System.currentTimeMillis(); - - cloneJei.getLogChannel().logDetailed( "Starting job entry" ); - for ( JobEntryListener jobEntryListener : jobEntryListeners ) { - jobEntryListener.beforeExecution( this, jobEntryCopy, cloneJei ); - } - if ( interactive ) { - if ( jobEntryCopy.isTransformation() ) { - getActiveJobEntryTransformations().put( jobEntryCopy, (JobEntryTrans) cloneJei ); + /** + * Execute a job with previous results passed in.
+ *
+ * Execute called by JobEntryJob: don't clear the jobEntryResults. + * + * @param nr The job entry number + * @param result the result of the previous execution + * @return Result of the job execution + * @throws KettleJobException + */ + public Result execute(int nr, Result result) throws KettleException { + finished.set(false); + active.set(true); + initialized.set(true); + KettleEnvironment.setExecutionInformation(this, rep); + + // Where do we start? + JobEntryCopy startpoint; + + // Perhaps there is already a list of input rows available? + if (getSourceRows() != null) { + result.setRows(getSourceRows()); } - if ( jobEntryCopy.isJob() ) { - getActiveJobEntryJobs().put( jobEntryCopy, (JobEntryJob) cloneJei ); + + startpoint = jobMeta.findJobEntry(JobMeta.STRING_SPECIAL_START, 0, false); + if (startpoint == null) { + throw new KettleJobException(BaseMessages.getString(PKG, "Job.Log.CounldNotFindStartingPoint")); } - } - log.snap( Metrics.METRIC_JOBENTRY_START, cloneJei.toString() ); - newResult = cloneJei.execute( prevResult, nr ); - log.snap( Metrics.METRIC_JOBENTRY_STOP, cloneJei.toString() ); - - final long end = System.currentTimeMillis(); - if ( interactive ) { - if ( jobEntryCopy.isTransformation() ) { - getActiveJobEntryTransformations().remove( jobEntryCopy ); + + Result res = execute(nr, result, startpoint, null, BaseMessages.getString(PKG, "Job.Reason.StartOfJobentry")); + + active.set(false); + + return res; + } + + /** + * Sets the finished flag. Then launch all the job listeners and call the jobFinished method for each.
+ * + * @see JobListener#jobFinished(Job) + */ + public void fireJobFinishListeners() throws KettleException { + synchronized (jobListeners) { + for (JobListener jobListener : jobListeners) { + jobListener.jobFinished(this); + } } - if ( jobEntryCopy.isJob() ) { - getActiveJobEntryJobs().remove( jobEntryCopy ); + } + + /** + * Call all the jobStarted method for each listener.
+ * + * @see JobListener#jobStarted(Job) + */ + public void fireJobStartListeners() throws KettleException { + synchronized (jobListeners) { + for (JobListener jobListener : jobListeners) { + jobListener.jobStarted(this); + } } - } + } - if ( cloneJei instanceof JobEntryTrans ) { - String throughput = newResult.getReadWriteThroughput( (int) ( ( end - start ) / 1000 ) ); - if ( throughput != null ) { - log.logMinimal( throughput ); + /** + * Execute a job entry recursively and move to the next job entry automatically.
+ * Uses a back-tracking algorithm.
+ * + * @param nr + * @param prev_result + * @param jobEntryCopy + * @param previous + * @param reason + * @return + * @throws KettleException + */ + private Result execute(final int nr, Result prev_result, final JobEntryCopy jobEntryCopy, JobEntryCopy previous, + String reason) throws KettleException { + Result res = null; + + if (stopped.get()) { + res = new Result(nr); + res.stopped = true; + return res; } - } - for ( JobEntryListener jobEntryListener : jobEntryListeners ) { - jobEntryListener.afterExecution( this, jobEntryCopy, cloneJei, newResult ); - } - - Thread.currentThread().setContextClassLoader( cl ); - addErrors( (int) newResult.getNrErrors() ); - - // Also capture the logging text after the execution... - // - LoggingBuffer loggingBuffer = KettleLogStore.getAppender(); - StringBuffer logTextBuffer = loggingBuffer.getBuffer( cloneJei.getLogChannel().getLogChannelId(), false ); - newResult.setLogText( logTextBuffer.toString() + newResult.getLogText() ); - - // Save this result as well... - // - JobEntryResult jerAfter = - new JobEntryResult( newResult, cloneJei.getLogChannel().getLogChannelId(), BaseMessages.getString( PKG, - "Job.Comment.JobFinished" ), null, jobEntryCopy.getName(), jobEntryCopy.getNr(), environmentSubstitute( - jobEntryCopy.getEntry().getFilename() ) ); - jobTracker.addJobTracker( new JobTracker( jobMeta, jerAfter ) ); - synchronized ( jobEntryResults ) { - jobEntryResults.add( jerAfter ); - - // Only keep the last X job entry results in memory + + // if we didn't have a previous result, create one, otherwise, copy the content... // - if ( maxJobEntriesLogged > 0 ) { - while ( jobEntryResults.size() > maxJobEntriesLogged ) { - // Remove the oldest. - jobEntryResults.removeFirst(); - } + final Result newResult; + Result prevResult = null; + if (prev_result != null) { + prevResult = prev_result.clone(); + } else { + prevResult = new Result(); } - } - } - extension = new JobExecutionExtension( this, prevResult, jobEntryCopy, extension.executeEntry ); - ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.JobAfterJobEntryExecution.id, extension ); + JobExecutionExtension extension = new JobExecutionExtension(this, prevResult, jobEntryCopy, true); + ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.JobBeforeJobEntryExecution.id, extension); - // Try all next job entries. - // - // Keep track of all the threads we fired in case of parallel execution... - // Keep track of the results of these executions too. - // - final List threads = new ArrayList(); - // next 2 lists is being modified concurrently so must be synchronized for this case. - final Queue threadResults = new ConcurrentLinkedQueue(); - final Queue threadExceptions = new ConcurrentLinkedQueue(); - final List threadEntries = new ArrayList(); + if (extension.result != null) { + prevResult = extension.result; + } - // Launch only those where the hop indicates true or false - // - int nrNext = jobMeta.findNrNextJobEntries( jobEntryCopy ); - for ( int i = 0; i < nrNext && !isStopped(); i++ ) { - // The next entry is... - final JobEntryCopy nextEntry = jobMeta.findNextJobEntry( jobEntryCopy, i ); - - // See if we need to execute this... - final JobHopMeta hi = jobMeta.findJobHop( jobEntryCopy, nextEntry ); - - // The next comment... - final String nextComment; - if ( hi.isUnconditional() ) { - nextComment = BaseMessages.getString( PKG, "Job.Comment.FollowedUnconditional" ); - } else { - if ( newResult.getResult() ) { - nextComment = BaseMessages.getString( PKG, "Job.Comment.FollowedSuccess" ); + if (!extension.executeEntry) { + newResult = prevResult; } else { - nextComment = BaseMessages.getString( PKG, "Job.Comment.FollowedFailure" ); - } - } - - // - // If the link is unconditional, execute the next job entry (entries). - // If the start point was an evaluation and the link color is correct: - // green or red, execute the next job entry... - // - if ( hi.isUnconditional() || ( jobEntryCopy.evaluates() && ( !( hi.getEvaluation() ^ newResult - .getResult() ) ) ) ) { - // Start this next step! - if ( log.isBasic() ) { - log.logBasic( BaseMessages.getString( PKG, "Job.Log.StartingEntry", nextEntry.getName() ) ); + if (log.isDetailed()) { + log.logDetailed("exec(" + nr + ", " + (prev_result != null ? prev_result.getNrErrors() : 0) + ", " + + (jobEntryCopy != null ? jobEntryCopy.toString() : "null") + ")"); + } + + // Which entry is next? + JobEntryInterface jobEntryInterface = jobEntryCopy.getEntry(); + jobEntryInterface.getLogChannel().setLogLevel(logLevel); + + // Track the fact that we are going to launch the next job entry... + JobEntryResult jerBefore = + new JobEntryResult(null, null, BaseMessages.getString(PKG, "Job.Comment.JobStarted"), reason, jobEntryCopy + .getName(), jobEntryCopy.getNr(), environmentSubstitute(jobEntryCopy.getEntry().getFilename())); + jobTracker.addJobTracker(new JobTracker(jobMeta, jerBefore)); + + ClassLoader cl = Thread.currentThread().getContextClassLoader(); + Thread.currentThread().setContextClassLoader(jobEntryInterface.getClass().getClassLoader()); + // Execute this entry... + JobEntryInterface cloneJei = (JobEntryInterface) jobEntryInterface.clone(); + ((VariableSpace) cloneJei).copyVariablesFrom(this); + cloneJei.setRepository(rep); + if (rep != null) { + cloneJei.setMetaStore(rep.getMetaStore()); + } + cloneJei.setParentJob(this); + final long start = System.currentTimeMillis(); + + cloneJei.getLogChannel().logDetailed("Starting job entry"); + for (JobEntryListener jobEntryListener : jobEntryListeners) { + jobEntryListener.beforeExecution(this, jobEntryCopy, cloneJei); + } + if (interactive) { + if (jobEntryCopy.isTransformation()) { + getActiveJobEntryTransformations().put(jobEntryCopy, (JobEntryTrans) cloneJei); + } + if (jobEntryCopy.isJob()) { + getActiveJobEntryJobs().put(jobEntryCopy, (JobEntryJob) cloneJei); + } + } + log.snap(Metrics.METRIC_JOBENTRY_START, cloneJei.toString()); + newResult = cloneJei.execute(prevResult, nr); + log.snap(Metrics.METRIC_JOBENTRY_STOP, cloneJei.toString()); + + final long end = System.currentTimeMillis(); + if (interactive) { + if (jobEntryCopy.isTransformation()) { + getActiveJobEntryTransformations().remove(jobEntryCopy); + } + if (jobEntryCopy.isJob()) { + getActiveJobEntryJobs().remove(jobEntryCopy); + } + } + + if (cloneJei instanceof JobEntryTrans) { + String throughput = newResult.getReadWriteThroughput((int) ((end - start) / 1000)); + if (throughput != null) { + log.logMinimal(throughput); + } + } + for (JobEntryListener jobEntryListener : jobEntryListeners) { + jobEntryListener.afterExecution(this, jobEntryCopy, cloneJei, newResult); + } + + Thread.currentThread().setContextClassLoader(cl); + addErrors((int) newResult.getNrErrors()); + + // Also capture the logging text after the execution... + // + LoggingBuffer loggingBuffer = KettleLogStore.getAppender(); + StringBuffer logTextBuffer = loggingBuffer.getBuffer(cloneJei.getLogChannel().getLogChannelId(), false); + newResult.setLogText(logTextBuffer.toString() + newResult.getLogText()); + + // Save this result as well... + // + JobEntryResult jerAfter = + new JobEntryResult(newResult, cloneJei.getLogChannel().getLogChannelId(), BaseMessages.getString(PKG, + "Job.Comment.JobFinished"), null, jobEntryCopy.getName(), jobEntryCopy.getNr(), environmentSubstitute( + jobEntryCopy.getEntry().getFilename())); + jobTracker.addJobTracker(new JobTracker(jobMeta, jerAfter)); + synchronized (jobEntryResults) { + jobEntryResults.add(jerAfter); + + // Only keep the last X job entry results in memory + // + if (maxJobEntriesLogged > 0) { + while (jobEntryResults.size() > maxJobEntriesLogged) { + // Remove the oldest. + jobEntryResults.removeFirst(); + } + } + } } - // Pass along the previous result, perhaps the next job can use it... - // However, set the number of errors back to 0 (if it should be reset) - // When an evaluation is executed the errors e.g. should not be reset. - if ( nextEntry.resetErrorsBeforeExecution() ) { - newResult.setNrErrors( 0 ); + extension = new JobExecutionExtension(this, prevResult, jobEntryCopy, extension.executeEntry); + ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.JobAfterJobEntryExecution.id, extension); + + // Try all next job entries. + // + // Keep track of all the threads we fired in case of parallel execution... + // Keep track of the results of these executions too. + // + final List threads = new ArrayList(); + // next 2 lists is being modified concurrently so must be synchronized for this case. + final Queue threadResults = new ConcurrentLinkedQueue(); + final Queue threadExceptions = new ConcurrentLinkedQueue(); + final List threadEntries = new ArrayList(); + + // Launch only those where the hop indicates true or false + // + int nrNext = jobMeta.findNrNextJobEntries(jobEntryCopy); + for (int i = 0; i < nrNext && !isStopped(); i++) { + // The next entry is... + final JobEntryCopy nextEntry = jobMeta.findNextJobEntry(jobEntryCopy, i); + + // See if we need to execute this... + final JobHopMeta hi = jobMeta.findJobHop(jobEntryCopy, nextEntry); + + // The next comment... + final String nextComment; + if (hi.isUnconditional()) { + nextComment = BaseMessages.getString(PKG, "Job.Comment.FollowedUnconditional"); + } else { + if (newResult.getResult()) { + nextComment = BaseMessages.getString(PKG, "Job.Comment.FollowedSuccess"); + } else { + nextComment = BaseMessages.getString(PKG, "Job.Comment.FollowedFailure"); + } + } + + // + // If the link is unconditional, execute the next job entry (entries). + // If the start point was an evaluation and the link color is correct: + // green or red, execute the next job entry... + // + if (hi.isUnconditional() || (jobEntryCopy.evaluates() && (!(hi.getEvaluation() ^ newResult + .getResult())))) { + // Start this next step! + if (log.isBasic()) { + log.logBasic(BaseMessages.getString(PKG, "Job.Log.StartingEntry", nextEntry.getName())); + } + + // Pass along the previous result, perhaps the next job can use it... + // However, set the number of errors back to 0 (if it should be reset) + // When an evaluation is executed the errors e.g. should not be reset. + if (nextEntry.resetErrorsBeforeExecution()) { + newResult.setNrErrors(0); + } + + // Now execute! + // + // if (we launch in parallel, fire the execution off in a new thread... + // + if (jobEntryCopy.isLaunchingInParallel()) { + threadEntries.add(nextEntry); + + Runnable runnable = new Runnable() { + public void run() { + try { + Result threadResult = execute(nr + 1, newResult, nextEntry, jobEntryCopy, nextComment); + threadResults.add(threadResult); + } catch (Throwable e) { + log.logError(Const.getStackTracker(e)); + threadExceptions.add(new KettleException(BaseMessages.getString(PKG, "Job.Log.UnexpectedError", + nextEntry.toString()), e)); + Result threadResult = new Result(); + threadResult.setResult(false); + threadResult.setNrErrors(1L); + threadResults.add(threadResult); + } + } + }; + Thread thread = new Thread(runnable); + threads.add(thread); + thread.start(); + if (log.isBasic()) { + log.logBasic(BaseMessages.getString(PKG, "Job.Log.LaunchedJobEntryInParallel", nextEntry.getName())); + } + } else { + try { + // Same as before: blocks until it's done + // + res = execute(nr + 1, newResult, nextEntry, jobEntryCopy, nextComment); + } catch (Throwable e) { + log.logError(Const.getStackTracker(e)); + throw new KettleException(BaseMessages.getString(PKG, "Job.Log.UnexpectedError", nextEntry.toString()), + e); + } + if (log.isBasic()) { + log.logBasic(BaseMessages.getString(PKG, "Job.Log.FinishedJobEntry", nextEntry.getName(), res.getResult() + + "")); + } + } + } } - // Now execute! + // OK, if we run in parallel, we need to wait for all the job entries to + // finish... // - // if (we launch in parallel, fire the execution off in a new thread... + if (jobEntryCopy.isLaunchingInParallel()) { + for (int i = 0; i < threads.size(); i++) { + Thread thread = threads.get(i); + JobEntryCopy nextEntry = threadEntries.get(i); + + try { + thread.join(); + } catch (InterruptedException e) { + log.logError(jobMeta.toString(), BaseMessages.getString(PKG, + "Job.Log.UnexpectedErrorWhileWaitingForJobEntry", nextEntry.getName())); + threadExceptions.add(new KettleException(BaseMessages.getString(PKG, + "Job.Log.UnexpectedErrorWhileWaitingForJobEntry", nextEntry.getName()), e)); + } + } + // if(log.isBasic()) log.logBasic(BaseMessages.getString(PKG, + // "Job.Log.FinishedJobEntry",startpoint.getName(),res.getResult()+"")); + } + + // Perhaps we don't have next steps?? + // In this case, return the previous result. + if (res == null) { + res = prevResult; + } + + // See if there where any errors in the parallel execution // - if ( jobEntryCopy.isLaunchingInParallel() ) { - threadEntries.add( nextEntry ); + if (threadExceptions.size() > 0) { + res.setResult(false); + res.setNrErrors(threadExceptions.size()); - Runnable runnable = new Runnable() { - public void run() { - try { - Result threadResult = execute( nr + 1, newResult, nextEntry, jobEntryCopy, nextComment ); - threadResults.add( threadResult ); - } catch ( Throwable e ) { - log.logError( Const.getStackTracker( e ) ); - threadExceptions.add( new KettleException( BaseMessages.getString( PKG, "Job.Log.UnexpectedError", - nextEntry.toString() ), e ) ); - Result threadResult = new Result(); - threadResult.setResult( false ); - threadResult.setNrErrors( 1L ); - threadResults.add( threadResult ); - } + for (KettleException e : threadExceptions) { + log.logError(jobMeta.toString(), e.getMessage(), e); } - }; - Thread thread = new Thread( runnable ); - threads.add( thread ); - thread.start(); - if ( log.isBasic() ) { - log.logBasic( BaseMessages.getString( PKG, "Job.Log.LaunchedJobEntryInParallel", nextEntry.getName() ) ); - } - } else { - try { - // Same as before: blocks until it's done + + // Now throw the first Exception for good measure... // - res = execute( nr + 1, newResult, nextEntry, jobEntryCopy, nextComment ); - } catch ( Throwable e ) { - log.logError( Const.getStackTracker( e ) ); - throw new KettleException( BaseMessages.getString( PKG, "Job.Log.UnexpectedError", nextEntry.toString() ), - e ); - } - if ( log.isBasic() ) { - log.logBasic( BaseMessages.getString( PKG, "Job.Log.FinishedJobEntry", nextEntry.getName(), res.getResult() - + "" ) ); - } + throw threadExceptions.poll(); } - } - } - // OK, if we run in parallel, we need to wait for all the job entries to - // finish... - // - if ( jobEntryCopy.isLaunchingInParallel() ) { - for ( int i = 0; i < threads.size(); i++ ) { - Thread thread = threads.get( i ); - JobEntryCopy nextEntry = threadEntries.get( i ); + // In parallel execution, we aggregate all the results, simply add them to + // the previous result... + // + for (Result threadResult : threadResults) { + res.add(threadResult); + } - try { - thread.join(); - } catch ( InterruptedException e ) { - log.logError( jobMeta.toString(), BaseMessages.getString( PKG, - "Job.Log.UnexpectedErrorWhileWaitingForJobEntry", nextEntry.getName() ) ); - threadExceptions.add( new KettleException( BaseMessages.getString( PKG, - "Job.Log.UnexpectedErrorWhileWaitingForJobEntry", nextEntry.getName() ), e ) ); + // If there have been errors, logically, we need to set the result to + // "false"... + // + if (res.getNrErrors() > 0) { + res.setResult(false); } - } - // if(log.isBasic()) log.logBasic(BaseMessages.getString(PKG, - // "Job.Log.FinishedJobEntry",startpoint.getName(),res.getResult()+"")); - } - // Perhaps we don't have next steps?? - // In this case, return the previous result. - if ( res == null ) { - res = prevResult; + return res; } - // See if there where any errors in the parallel execution - // - if ( threadExceptions.size() > 0 ) { - res.setResult( false ); - res.setNrErrors( threadExceptions.size() ); + /** + * Wait until this job has finished. + */ + public void waitUntilFinished() { + waitUntilFinished(-1L); + } - for ( KettleException e : threadExceptions ) { - log.logError( jobMeta.toString(), e.getMessage(), e ); - } + /** + * Wait until this job has finished. + * + * @param maxMiliseconds the maximum number of ms to wait + */ + public void waitUntilFinished(long maxMiliseconds) { + long time = 0L; + while (isAlive() && (time < maxMiliseconds || maxMiliseconds <= 0)) { + try { + Thread.sleep(1); + time += 1; + } catch (InterruptedException e) { + // Ignore sleep errors + } + } + } - // Now throw the first Exception for good measure... - // - throw threadExceptions.poll(); + /** + * Get the number of errors that happened in the job. + * + * @return nr of error that have occurred during execution. During execution of a job the number can change. + */ + public int getErrors() { + return errors.get(); } - // In parallel execution, we aggregate all the results, simply add them to - // the previous result... - // - for ( Result threadResult : threadResults ) { - res.add( threadResult ); + /** + * Set the number of occured errors to 0. + */ + public void resetErrors() { + errors.set(0); } - // If there have been errors, logically, we need to set the result to - // "false"... - // - if ( res.getNrErrors() > 0 ) { - res.setResult( false ); + /** + * Add a number of errors to the total number of erros that occured during execution. + * + * @param nrToAdd nr of errors to add. + */ + public void addErrors(int nrToAdd) { + if (nrToAdd > 0) { + errors.addAndGet(nrToAdd); + } } - return res; - } + /** + * Handle logging at start + * + * @return true if it went OK. + * @throws KettleException + */ + public boolean beginProcessing() throws KettleException { + currentDate = new Date(); + logDate = new Date(); + startDate = Const.MIN_DATE; + endDate = currentDate; + + resetErrors(); + + final JobLogTable jobLogTable = jobMeta.getJobLogTable(); + int intervalInSeconds = Const.toInt(environmentSubstitute(jobLogTable.getLogInterval()), -1); + + if (jobLogTable.isDefined()) { + + DatabaseMeta logcon = jobMeta.getJobLogTable().getDatabaseMeta(); + String schemaName = environmentSubstitute(jobMeta.getJobLogTable().getActualSchemaName()); + String tableName = environmentSubstitute(jobMeta.getJobLogTable().getActualTableName()); + String schemaAndTable = + jobMeta.getJobLogTable().getDatabaseMeta().getQuotedSchemaTableCombination(schemaName, tableName); + Database ldb = new Database(this, logcon); + ldb.shareVariablesWith(this); + ldb.connect(); + ldb.setCommit(logCommitSize); - /** - * Wait until this job has finished. - */ - public void waitUntilFinished() { - waitUntilFinished( -1L ); - } + try { + // See if we have to add a batch id... + Long id_batch = new Long(1); + if (jobMeta.getJobLogTable().isBatchIdUsed()) { + id_batch = logcon.getNextBatchId(ldb, schemaName, tableName, jobLogTable.getKeyField().getFieldName()); + setBatchId(id_batch.longValue()); + if (getPassedBatchId() <= 0) { + setPassedBatchId(id_batch.longValue()); + } + } + + Object[] lastr = ldb.getLastLogDate(schemaAndTable, jobMeta.getName(), true, LogStatus.END); + if (!Const.isEmpty(lastr)) { + Date last; + try { + last = ldb.getReturnRowMeta().getDate(lastr, 0); + } catch (KettleValueException e) { + throw new KettleJobException(BaseMessages.getString(PKG, "Job.Log.ConversionError", "" + tableName), e); + } + if (last != null) { + startDate = last; + } + } + + depDate = currentDate; + + ldb.writeLogRecord(jobMeta.getJobLogTable(), LogStatus.START, this, null); + if (!ldb.isAutoCommit()) { + ldb.commitLog(true, jobMeta.getJobLogTable()); + } + ldb.disconnect(); + + // If we need to do periodic logging, make sure to install a timer for + // this... + // + if (intervalInSeconds > 0) { + final Timer timer = new Timer(getName() + " - interval logging timer"); + TimerTask timerTask = new TimerTask() { + public void run() { + try { + endProcessing(); + } catch (Exception e) { + log.logError(BaseMessages.getString(PKG, "Job.Exception.UnableToPerformIntervalLogging"), e); + // Also stop the show... + // + + errors.incrementAndGet(); + stopAll(); + } + } + }; + timer.schedule(timerTask, intervalInSeconds * 1000, intervalInSeconds * 1000); + + addJobListener(new JobAdapter() { + public void jobFinished(Job job) { + timer.cancel(); + } + }); + } + + // Add a listener at the end of the job to take of writing the final job + // log record... + // + addJobListener(new JobAdapter() { + public void jobFinished(Job job) throws KettleException { + try { + endProcessing(); + } catch (KettleJobException e) { + log.logError(BaseMessages.getString(PKG, "Job.Exception.UnableToWriteToLoggingTable", jobLogTable + .toString()), e); + // do not skip exception here + // job is failed in case log database record is failed! + throw new KettleException(e); + } + } + }); + + } catch (KettleDatabaseException dbe) { + addErrors(1); // This is even before actual execution + throw new KettleJobException(BaseMessages.getString(PKG, "Job.Log.UnableToProcessLoggingStart", "" + + tableName), dbe); + } finally { + ldb.disconnect(); + } + } - /** - * Wait until this job has finished. - * - * @param maxMiliseconds - * the maximum number of ms to wait - */ - public void waitUntilFinished( long maxMiliseconds ) { - long time = 0L; - while ( isAlive() && ( time < maxMiliseconds || maxMiliseconds <= 0 ) ) { - try { - Thread.sleep( 1 ); - time += 1; - } catch ( InterruptedException e ) { - // Ignore sleep errors - } - } - } - - /** - * Get the number of errors that happened in the job. - * - * @return nr of error that have occurred during execution. During execution of a job the number can change. - */ - public int getErrors() { - return errors.get(); - } + // If we need to write out the job entry logging information, do so at the end of the job: + // + JobEntryLogTable jobEntryLogTable = jobMeta.getJobEntryLogTable(); + if (jobEntryLogTable.isDefined()) { + addJobListener(new JobAdapter() { + public void jobFinished(Job job) throws KettleException { + try { + writeJobEntryLogInformation(); + } catch (KettleException e) { + throw new KettleException(BaseMessages.getString(PKG, + "Job.Exception.UnableToPerformJobEntryLoggingAtJobEnd"), e); + } + } + }); + } - /** - * Set the number of occured errors to 0. - */ - public void resetErrors() { - errors.set( 0 ); - } + // If we need to write the log channel hierarchy and lineage information, + // add a listener for that too... + // + ChannelLogTable channelLogTable = jobMeta.getChannelLogTable(); + if (channelLogTable.isDefined()) { + addJobListener(new JobAdapter() { + + public void jobFinished(Job job) throws KettleException { + try { + writeLogChannelInformation(); + } catch (KettleException e) { + throw new KettleException(BaseMessages.getString(PKG, "Job.Exception.UnableToPerformLoggingAtTransEnd"), + e); + } + } + }); + } - /** - * Add a number of errors to the total number of erros that occured during execution. - * - * @param nrToAdd - * nr of errors to add. - */ - public void addErrors( int nrToAdd ) { - if ( nrToAdd > 0 ) { - errors.addAndGet( nrToAdd ); + JobExecutionExtension extension = new JobExecutionExtension(this, result, null, false); + ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.JobBeginProcessing.id, extension); + + return true; } - } - /** - * Handle logging at start - * - * @return true if it went OK. - * - * @throws KettleException - */ - public boolean beginProcessing() throws KettleException { - currentDate = new Date(); - logDate = new Date(); - startDate = Const.MIN_DATE; - endDate = currentDate; - - resetErrors(); - - final JobLogTable jobLogTable = jobMeta.getJobLogTable(); - int intervalInSeconds = Const.toInt( environmentSubstitute( jobLogTable.getLogInterval() ), -1 ); - - if ( jobLogTable.isDefined() ) { - - DatabaseMeta logcon = jobMeta.getJobLogTable().getDatabaseMeta(); - String schemaName = environmentSubstitute( jobMeta.getJobLogTable().getActualSchemaName() ); - String tableName = environmentSubstitute( jobMeta.getJobLogTable().getActualTableName() ); - String schemaAndTable = - jobMeta.getJobLogTable().getDatabaseMeta().getQuotedSchemaTableCombination( schemaName, tableName ); - Database ldb = new Database( this, logcon ); - ldb.shareVariablesWith( this ); - ldb.connect(); - ldb.setCommit( logCommitSize ); - - try { - // See if we have to add a batch id... - Long id_batch = new Long( 1 ); - if ( jobMeta.getJobLogTable().isBatchIdUsed() ) { - id_batch = logcon.getNextBatchId( ldb, schemaName, tableName, jobLogTable.getKeyField().getFieldName() ); - setBatchId( id_batch.longValue() ); - if ( getPassedBatchId() <= 0 ) { - setPassedBatchId( id_batch.longValue() ); - } + // + // Handle logging at end + + /** + * End processing. + * + * @return true, if successful + * @throws KettleJobException the kettle job exception + */ + private boolean endProcessing() throws KettleJobException { + LogStatus status; + if (!isActive()) { + if (isStopped()) { + status = LogStatus.STOP; + } else { + status = LogStatus.END; + } + } else { + status = LogStatus.RUNNING; } + try { + if (errors.get() == 0 && result != null && !result.getResult()) { + errors.incrementAndGet(); + } - Object[] lastr = ldb.getLastLogDate( schemaAndTable, jobMeta.getName(), true, LogStatus.END ); - if ( !Const.isEmpty( lastr ) ) { - Date last; - try { - last = ldb.getReturnRowMeta().getDate( lastr, 0 ); - } catch ( KettleValueException e ) { - throw new KettleJobException( BaseMessages.getString( PKG, "Job.Log.ConversionError", "" + tableName ), e ); - } - if ( last != null ) { - startDate = last; - } - } + logDate = new Date(); - depDate = currentDate; + /* + * Sums errors, read, written, etc. + */ - ldb.writeLogRecord( jobMeta.getJobLogTable(), LogStatus.START, this, null ); - if ( !ldb.isAutoCommit() ) { - ldb.commitLog( true, jobMeta.getJobLogTable() ); + JobLogTable jobLogTable = jobMeta.getJobLogTable(); + if (jobLogTable.isDefined()) { + + writeLogTableInformation(jobLogTable, status); + } + + return true; + } catch (Exception e) { + throw new KettleJobException(e); // In case something else goes wrong. } - ldb.disconnect(); + } - // If we need to do periodic logging, make sure to install a timer for - // this... - // - if ( intervalInSeconds > 0 ) { - final Timer timer = new Timer( getName() + " - interval logging timer" ); - TimerTask timerTask = new TimerTask() { - public void run() { - try { - endProcessing(); - } catch ( Exception e ) { - log.logError( BaseMessages.getString( PKG, "Job.Exception.UnableToPerformIntervalLogging" ), e ); - // Also stop the show... - // + /** + * Writes information to Job Log table. + * Cleans old records, in case job is finished. + */ + protected void writeLogTableInformation(JobLogTable jobLogTable, LogStatus status) + throws KettleJobException, KettleDatabaseException { + boolean cleanLogRecords = status.equals(LogStatus.END); + String tableName = jobLogTable.getActualTableName(); + DatabaseMeta logcon = jobLogTable.getDatabaseMeta(); + + Database ldb = createDataBase(logcon); + ldb.shareVariablesWith(this); + try { + ldb.connect(); + ldb.setCommit(logCommitSize); + ldb.writeLogRecord(jobLogTable, status, this, null); - errors.incrementAndGet(); - stopAll(); - } + if (cleanLogRecords) { + ldb.cleanupLogRecords(jobLogTable); } - }; - timer.schedule( timerTask, intervalInSeconds * 1000, intervalInSeconds * 1000 ); - addJobListener( new JobAdapter() { - public void jobFinished( Job job ) { - timer.cancel(); + } catch (KettleDatabaseException dbe) { + addErrors(1); + throw new KettleJobException("Unable to end processing by writing log record to table " + tableName, dbe); + } finally { + if (!ldb.isAutoCommit()) { + ldb.commitLog(true, jobLogTable); } - } ); + ldb.disconnect(); } + } - // Add a listener at the end of the job to take of writing the final job - // log record... - // - addJobListener( new JobAdapter() { - public void jobFinished( Job job ) throws KettleException { - try { - endProcessing(); - } catch ( KettleJobException e ) { - log.logError( BaseMessages.getString( PKG, "Job.Exception.UnableToWriteToLoggingTable", jobLogTable - .toString() ), e ); - // do not skip exception here - // job is failed in case log database record is failed! - throw new KettleException( e ); + /** + * Write log channel information. + * + * @throws KettleException the kettle exception + */ + protected void writeLogChannelInformation() throws KettleException { + Database db = null; + ChannelLogTable channelLogTable = jobMeta.getChannelLogTable(); + + // PDI-7070: If parent job has the same channel logging info, don't duplicate log entries + Job j = getParentJob(); + + if (j != null) { + if (channelLogTable.equals(j.getJobMeta().getChannelLogTable())) { + return; } - } - } ); + } + // end PDI-7070 - } catch ( KettleDatabaseException dbe ) { - addErrors( 1 ); // This is even before actual execution - throw new KettleJobException( BaseMessages.getString( PKG, "Job.Log.UnableToProcessLoggingStart", "" - + tableName ), dbe ); - } finally { - ldb.disconnect(); - } - } + try { + db = new Database(this, channelLogTable.getDatabaseMeta()); + db.shareVariablesWith(this); + db.connect(); + db.setCommit(logCommitSize); + + List loggingHierarchyList = getLoggingHierarchy(); + for (LoggingHierarchy loggingHierarchy : loggingHierarchyList) { + db.writeLogRecord(channelLogTable, LogStatus.START, loggingHierarchy, null); + } - // If we need to write out the job entry logging information, do so at the end of the job: - // - JobEntryLogTable jobEntryLogTable = jobMeta.getJobEntryLogTable(); - if ( jobEntryLogTable.isDefined() ) { - addJobListener( new JobAdapter() { - public void jobFinished( Job job ) throws KettleException { - try { - writeJobEntryLogInformation(); - } catch ( KettleException e ) { - throw new KettleException( BaseMessages.getString( PKG, - "Job.Exception.UnableToPerformJobEntryLoggingAtJobEnd" ), e ); - } + // Also time-out the log records in here... + // + db.cleanupLogRecords(channelLogTable); + + } catch (Exception e) { + throw new KettleException(BaseMessages.getString(PKG, + "Trans.Exception.UnableToWriteLogChannelInformationToLogTable"), e); + } finally { + if (!db.isAutoCommit()) { + db.commit(true); + } + db.disconnect(); } - } ); } - // If we need to write the log channel hierarchy and lineage information, - // add a listener for that too... - // - ChannelLogTable channelLogTable = jobMeta.getChannelLogTable(); - if ( channelLogTable.isDefined() ) { - addJobListener( new JobAdapter() { - - public void jobFinished( Job job ) throws KettleException { - try { - writeLogChannelInformation(); - } catch ( KettleException e ) { - throw new KettleException( BaseMessages.getString( PKG, "Job.Exception.UnableToPerformLoggingAtTransEnd" ), - e ); - } + /** + * Write job entry log information. + * + * @throws KettleException the kettle exception + */ + protected void writeJobEntryLogInformation() throws KettleException { + Database db = null; + JobEntryLogTable jobEntryLogTable = getJobMeta().getJobEntryLogTable(); + try { + db = createDataBase(jobEntryLogTable.getDatabaseMeta()); + db.shareVariablesWith(this); + db.connect(); + db.setCommit(logCommitSize); + + for (JobEntryCopy copy : getJobMeta().getJobCopies()) { + db.writeLogRecord(jobEntryLogTable, LogStatus.START, copy, this); + } + + db.cleanupLogRecords(jobEntryLogTable); + } catch (Exception e) { + throw new KettleException(BaseMessages.getString(PKG, "Job.Exception.UnableToJobEntryInformationToLogTable"), + e); + } finally { + if (!db.isAutoCommit()) { + db.commitLog(true, jobEntryLogTable); + } + db.disconnect(); } - } ); } - JobExecutionExtension extension = new JobExecutionExtension( this, result, null, false ); - ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.JobBeginProcessing.id, extension ); + protected Database createDataBase(DatabaseMeta databaseMeta) { + return new Database(this, databaseMeta); + } - return true; - } + /** + * Checks if is active. + * + * @return true, if is active + */ + public boolean isActive() { + return active.get(); + } - // - // Handle logging at end - /** - * End processing. - * - * @return true, if successful - * @throws KettleJobException - * the kettle job exception - */ - private boolean endProcessing() throws KettleJobException { - LogStatus status; - if ( !isActive() ) { - if ( isStopped() ) { - status = LogStatus.STOP; - } else { - status = LogStatus.END; - } - } else { - status = LogStatus.RUNNING; - } - try { - if ( errors.get() == 0 && result != null && !result.getResult() ) { - errors.incrementAndGet(); - } - - logDate = new Date(); + /** + * Stop all activity by setting the stopped property to true. + */ + public void stopAll() { + stopped.set(true); + } - /* - * Sums errors, read, written, etc. - */ + /** + * Sets the stopped. + * + * @param stopped the new stopped + */ + public void setStopped(boolean stopped) { + this.stopped.set(stopped); + } - JobLogTable jobLogTable = jobMeta.getJobLogTable(); - if ( jobLogTable.isDefined() ) { + /** + * Gets the stopped status of this Job. + * + * @return Returns the stopped status of this Job + */ + public boolean isStopped() { + return stopped.get(); + } - writeLogTableInformation( jobLogTable, status ); - } + /** + * Gets the start date. + * + * @return Returns the startDate + */ + public Date getStartDate() { + return startDate; + } - return true; - } catch ( Exception e ) { - throw new KettleJobException( e ); // In case something else goes wrong. + /** + * Gets the end date. + * + * @return Returns the endDate + */ + public Date getEndDate() { + return endDate; } - } - /** - * Writes information to Job Log table. - * Cleans old records, in case job is finished. - * - */ - protected void writeLogTableInformation( JobLogTable jobLogTable, LogStatus status ) - throws KettleJobException, KettleDatabaseException { - boolean cleanLogRecords = status.equals( LogStatus.END ); - String tableName = jobLogTable.getActualTableName(); - DatabaseMeta logcon = jobLogTable.getDatabaseMeta(); - - Database ldb = createDataBase( logcon ); - ldb.shareVariablesWith( this ); - try { - ldb.connect(); - ldb.setCommit( logCommitSize ); - ldb.writeLogRecord( jobLogTable, status, this, null ); - - if ( cleanLogRecords ) { - ldb.cleanupLogRecords( jobLogTable ); - } - - } catch ( KettleDatabaseException dbe ) { - addErrors( 1 ); - throw new KettleJobException( "Unable to end processing by writing log record to table " + tableName, dbe ); - } finally { - if ( !ldb.isAutoCommit() ) { - ldb.commitLog( true, jobLogTable ); - } - ldb.disconnect(); - } - } - /** - * Write log channel information. - * - * @throws KettleException - * the kettle exception - */ - protected void writeLogChannelInformation() throws KettleException { - Database db = null; - ChannelLogTable channelLogTable = jobMeta.getChannelLogTable(); - - // PDI-7070: If parent job has the same channel logging info, don't duplicate log entries - Job j = getParentJob(); - - if ( j != null ) { - if ( channelLogTable.equals( j.getJobMeta().getChannelLogTable() ) ) { - return; - } - } - // end PDI-7070 - - try { - db = new Database( this, channelLogTable.getDatabaseMeta() ); - db.shareVariablesWith( this ); - db.connect(); - db.setCommit( logCommitSize ); - - List loggingHierarchyList = getLoggingHierarchy(); - for ( LoggingHierarchy loggingHierarchy : loggingHierarchyList ) { - db.writeLogRecord( channelLogTable, LogStatus.START, loggingHierarchy, null ); - } - - // Also time-out the log records in here... - // - db.cleanupLogRecords( channelLogTable ); - - } catch ( Exception e ) { - throw new KettleException( BaseMessages.getString( PKG, - "Trans.Exception.UnableToWriteLogChannelInformationToLogTable" ), e ); - } finally { - if ( !db.isAutoCommit() ) { - db.commit( true ); - } - db.disconnect(); - } - } - - /** - * Write job entry log information. - * - * @throws KettleException - * the kettle exception - */ - protected void writeJobEntryLogInformation() throws KettleException { - Database db = null; - JobEntryLogTable jobEntryLogTable = getJobMeta().getJobEntryLogTable(); - try { - db = createDataBase( jobEntryLogTable.getDatabaseMeta() ); - db.shareVariablesWith( this ); - db.connect(); - db.setCommit( logCommitSize ); - - for ( JobEntryCopy copy : getJobMeta().getJobCopies() ) { - db.writeLogRecord( jobEntryLogTable, LogStatus.START, copy, this ); - } - - db.cleanupLogRecords( jobEntryLogTable ); - } catch ( Exception e ) { - throw new KettleException( BaseMessages.getString( PKG, "Job.Exception.UnableToJobEntryInformationToLogTable" ), - e ); - } finally { - if ( !db.isAutoCommit() ) { - db.commitLog( true, jobEntryLogTable ); - } - db.disconnect(); - } - } - - protected Database createDataBase( DatabaseMeta databaseMeta ) { - return new Database( this, databaseMeta ); - } - - /** - * Checks if is active. - * - * @return true, if is active - */ - public boolean isActive() { - return active.get(); - } + /** + * Gets the current date. + * + * @return Returns the currentDate + */ + public Date getCurrentDate() { + return currentDate; + } - /** - * Stop all activity by setting the stopped property to true. - */ - public void stopAll() { - stopped.set( true ); - } + /** + * Gets the dep date. + * + * @return Returns the depDate + */ + public Date getDepDate() { + return depDate; + } - /** - * Sets the stopped. - * - * @param stopped - * the new stopped - */ - public void setStopped( boolean stopped ) { - this.stopped.set( stopped ); - } + /** + * Gets the log date. + * + * @return Returns the logDate + */ + public Date getLogDate() { + return logDate; + } - /** - * Gets the stopped status of this Job. - * - * @return Returns the stopped status of this Job - */ - public boolean isStopped() { - return stopped.get(); - } + /** + * Gets the job meta. + * + * @return Returns the JobMeta + */ + public JobMeta getJobMeta() { + return jobMeta; + } - /** - * Gets the start date. - * - * @return Returns the startDate - */ - public Date getStartDate() { - return startDate; - } + /** + * Gets the rep (repository). + * + * @return Returns the rep + */ + public Repository getRep() { + return rep; + } - /** - * Gets the end date. - * - * @return Returns the endDate - */ - public Date getEndDate() { - return endDate; - } + /** + * Gets the thread. + * + * @return the thread + */ + public Thread getThread() { + return this; + } - /** - * Gets the current date. - * - * @return Returns the currentDate - */ - public Date getCurrentDate() { - return currentDate; - } + /** + * Gets the job tracker. + * + * @return Returns the jobTracker + */ + public JobTracker getJobTracker() { + return jobTracker; + } - /** - * Gets the dep date. - * - * @return Returns the depDate - */ - public Date getDepDate() { - return depDate; - } + /** + * Sets the job tracker. + * + * @param jobTracker The jobTracker to set + */ + public void setJobTracker(JobTracker jobTracker) { + this.jobTracker = jobTracker; + } - /** - * Gets the log date. - * - * @return Returns the logDate - */ - public Date getLogDate() { - return logDate; - } + /** + * Sets the source rows. + * + * @param sourceRows the new source rows + */ + public void setSourceRows(List sourceRows) { + this.sourceRows = sourceRows; + } - /** - * Gets the job meta. - * - * @return Returns the JobMeta - */ - public JobMeta getJobMeta() { - return jobMeta; - } + /** + * Gets the source rows. + * + * @return the source rows + */ + public List getSourceRows() { + return sourceRows; + } - /** - * Gets the rep (repository). - * - * @return Returns the rep - */ - public Repository getRep() { - return rep; - } + /** + * Gets the parent job. + * + * @return Returns the parentJob + */ + public Job getParentJob() { + return parentJob; + } - /** - * Gets the thread. - * - * @return the thread - */ - public Thread getThread() { - return this; - } + /** + * Sets the parent job. + * + * @param parentJob The parentJob to set. + */ + public void setParentJob(Job parentJob) { + this.logLevel = parentJob.getLogLevel(); + this.log.setLogLevel(logLevel); + this.containerObjectId = log.getContainerObjectId(); + this.parentJob = parentJob; + } - /** - * Gets the job tracker. - * - * @return Returns the jobTracker - */ - public JobTracker getJobTracker() { - return jobTracker; - } + /** + * Gets the result. + * + * @return the result + */ + public Result getResult() { + return result; + } - /** - * Sets the job tracker. - * - * @param jobTracker - * The jobTracker to set - */ - public void setJobTracker( JobTracker jobTracker ) { - this.jobTracker = jobTracker; - } + /** + * Sets the result. + * + * @param result the new result + */ + public void setResult(Result result) { + this.result = result; + } - /** - * Sets the source rows. - * - * @param sourceRows - * the new source rows - */ - public void setSourceRows( List sourceRows ) { - this.sourceRows = sourceRows; - } + /** + * Gets the boolean value of initialized. + * + * @return Returns the initialized + */ + public boolean isInitialized() { + return initialized.get(); + } - /** - * Gets the source rows. - * - * @return the source rows - */ - public List getSourceRows() { - return sourceRows; - } + /** + * Gets the batchId. + * + * @return Returns the batchId + */ + public long getBatchId() { + return batchId; + } - /** - * Gets the parent job. - * - * @return Returns the parentJob - */ - public Job getParentJob() { - return parentJob; - } + /** + * Sets the batchId. + * + * @param batchId The batchId to set + */ + public void setBatchId(long batchId) { + this.batchId = batchId; + } - /** - * Sets the parent job. - * - * @param parentJob - * The parentJob to set. - */ - public void setParentJob( Job parentJob ) { - this.logLevel = parentJob.getLogLevel(); - this.log.setLogLevel( logLevel ); - this.containerObjectId = log.getContainerObjectId(); - this.parentJob = parentJob; - } - - /** - * Gets the result. - * - * @return the result - */ - public Result getResult() { - return result; - } + /** + * Gets the passedBatchId. + * + * @return the passedBatchId + */ + public long getPassedBatchId() { + return passedBatchId; + } - /** - * Sets the result. - * - * @param result - * the new result - */ - public void setResult( Result result ) { - this.result = result; - } + /** + * Sets the passedBatchId. + * + * @param jobBatchId the jobBatchId to set + */ + public void setPassedBatchId(long jobBatchId) { + this.passedBatchId = jobBatchId; + } - /** - * Gets the boolean value of initialized. - * - * @return Returns the initialized - */ - public boolean isInitialized() { - return initialized.get(); - } + /** + * Sets the internal kettle variables. + * + * @param var the new internal kettle variables. + */ + public void setInternalKettleVariables(VariableSpace var) { + if (jobMeta != null && jobMeta.getFilename() != null) { // we have a finename that's defined. + try { + FileObject fileObject = KettleVFS.getFileObject(jobMeta.getFilename(), this); + FileName fileName = fileObject.getName(); + + // The filename of the transformation + variables.setVariable(Const.INTERNAL_VARIABLE_JOB_FILENAME_NAME, fileName.getBaseName()); + + // The directory of the transformation + FileName fileDir = fileName.getParent(); + variables.setVariable(Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY, fileDir.getURI()); + } catch (Exception e) { + variables.setVariable(Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY, ""); + variables.setVariable(Const.INTERNAL_VARIABLE_JOB_FILENAME_NAME, ""); + } + } else { + variables.setVariable(Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY, ""); + variables.setVariable(Const.INTERNAL_VARIABLE_JOB_FILENAME_NAME, ""); + } - /** - * Gets the batchId. - * - * @return Returns the batchId - */ - public long getBatchId() { - return batchId; - } + boolean hasRepoDir = jobMeta.getRepositoryDirectory() != null && jobMeta.getRepository() != null; - /** - * Sets the batchId. - * - * @param batchId - * The batchId to set - */ - public void setBatchId( long batchId ) { - this.batchId = batchId; - } + // The name of the job + variables.setVariable(Const.INTERNAL_VARIABLE_JOB_NAME, Const.NVL(jobMeta.getName(), "")); - /** - * Gets the passedBatchId. - * - * @return the passedBatchId - */ - public long getPassedBatchId() { - return passedBatchId; - } + // The name of the directory in the repository + variables.setVariable(Const.INTERNAL_VARIABLE_JOB_REPOSITORY_DIRECTORY, hasRepoDir ? jobMeta + .getRepositoryDirectory().getPath() : ""); - /** - * Sets the passedBatchId. - * - * @param jobBatchId - * the jobBatchId to set - */ - public void setPassedBatchId( long jobBatchId ) { - this.passedBatchId = jobBatchId; - } + // setup fallbacks + if (hasRepoDir) { + variables.setVariable(Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY, variables.getVariable( + Const.INTERNAL_VARIABLE_JOB_REPOSITORY_DIRECTORY)); + } else { + variables.setVariable(Const.INTERNAL_VARIABLE_JOB_REPOSITORY_DIRECTORY, variables.getVariable( + Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY)); + } - /** - * Sets the internal kettle variables. - * - * @param var - * the new internal kettle variables. - */ - public void setInternalKettleVariables( VariableSpace var ) { - if ( jobMeta != null && jobMeta.getFilename() != null ) { // we have a finename that's defined. - try { - FileObject fileObject = KettleVFS.getFileObject( jobMeta.getFilename(), this ); - FileName fileName = fileObject.getName(); - - // The filename of the transformation - variables.setVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_NAME, fileName.getBaseName() ); - - // The directory of the transformation - FileName fileDir = fileName.getParent(); - variables.setVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY, fileDir.getURI() ); - } catch ( Exception e ) { - variables.setVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY, "" ); - variables.setVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_NAME, "" ); - } - } else { - variables.setVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY, "" ); - variables.setVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_NAME, "" ); - } - - boolean hasRepoDir = jobMeta.getRepositoryDirectory() != null && jobMeta.getRepository() != null; - - // The name of the job - variables.setVariable( Const.INTERNAL_VARIABLE_JOB_NAME, Const.NVL( jobMeta.getName(), "" ) ); - - // The name of the directory in the repository - variables.setVariable( Const.INTERNAL_VARIABLE_JOB_REPOSITORY_DIRECTORY, hasRepoDir ? jobMeta - .getRepositoryDirectory().getPath() : "" ); - - // setup fallbacks - if ( hasRepoDir ) { - variables.setVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY, variables.getVariable( - Const.INTERNAL_VARIABLE_JOB_REPOSITORY_DIRECTORY ) ); - } else { - variables.setVariable( Const.INTERNAL_VARIABLE_JOB_REPOSITORY_DIRECTORY, variables.getVariable( - Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY ) ); - } - - if ( hasRepoDir ) { - variables.setVariable( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY, variables.getVariable( - Const.INTERNAL_VARIABLE_JOB_REPOSITORY_DIRECTORY ) ); - if ( "/".equals( variables.getVariable( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY ) ) ) { - variables.setVariable( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY, "" ); - } - } else { - variables.setVariable( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY, variables.getVariable( - Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY ) ); - } - } - - /* + if (hasRepoDir) { + variables.setVariable(Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY, variables.getVariable( + Const.INTERNAL_VARIABLE_JOB_REPOSITORY_DIRECTORY)); + if ("/".equals(variables.getVariable(Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY))) { + variables.setVariable(Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY, ""); + } + } else { + variables.setVariable(Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY, variables.getVariable( + Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY)); + } + } + + /* * (non-Javadoc) * * @see org.pentaho.di.core.variables.VariableSpace#copyVariablesFrom(org.pentaho.di.core.variables.VariableSpace) */ - public void copyVariablesFrom( VariableSpace space ) { - variables.copyVariablesFrom( space ); - } + public void copyVariablesFrom(VariableSpace space) { + variables.copyVariablesFrom(space); + } - /* + /* * (non-Javadoc) * * @see org.pentaho.di.core.variables.VariableSpace#environmentSubstitute(java.lang.String) */ - public String environmentSubstitute( String aString ) { - return variables.environmentSubstitute( aString ); - } + public String environmentSubstitute(String aString) { + return variables.environmentSubstitute(aString); + } - /* + /* * (non-Javadoc) * * @see org.pentaho.di.core.variables.VariableSpace#environmentSubstitute(java.lang.String[]) */ - public String[] environmentSubstitute( String[] aString ) { - return variables.environmentSubstitute( aString ); - } + public String[] environmentSubstitute(String[] aString) { + return variables.environmentSubstitute(aString); + } - public String fieldSubstitute( String aString, RowMetaInterface rowMeta, Object[] rowData ) - throws KettleValueException { - return variables.fieldSubstitute( aString, rowMeta, rowData ); - } + public String fieldSubstitute(String aString, RowMetaInterface rowMeta, Object[] rowData) + throws KettleValueException { + return variables.fieldSubstitute(aString, rowMeta, rowData); + } - /* + /* * (non-Javadoc) * * @see org.pentaho.di.core.variables.VariableSpace#getParentVariableSpace() */ - public VariableSpace getParentVariableSpace() { - return variables.getParentVariableSpace(); - } + public VariableSpace getParentVariableSpace() { + return variables.getParentVariableSpace(); + } - /* + /* * (non-Javadoc) * * @see * org.pentaho.di.core.variables.VariableSpace#setParentVariableSpace(org.pentaho.di.core.variables.VariableSpace) */ - public void setParentVariableSpace( VariableSpace parent ) { - variables.setParentVariableSpace( parent ); - } + public void setParentVariableSpace(VariableSpace parent) { + variables.setParentVariableSpace(parent); + } - /* + /* * (non-Javadoc) * * @see org.pentaho.di.core.variables.VariableSpace#getVariable(java.lang.String, java.lang.String) */ - public String getVariable( String variableName, String defaultValue ) { - return variables.getVariable( variableName, defaultValue ); - } + public String getVariable(String variableName, String defaultValue) { + return variables.getVariable(variableName, defaultValue); + } - /* + /* * (non-Javadoc) * * @see org.pentaho.di.core.variables.VariableSpace#getVariable(java.lang.String) */ - public String getVariable( String variableName ) { - return variables.getVariable( variableName ); - } + public String getVariable(String variableName) { + return variables.getVariable(variableName); + } - /* + /* * (non-Javadoc) * * @see org.pentaho.di.core.variables.VariableSpace#getBooleanValueOfVariable(java.lang.String, boolean) */ - public boolean getBooleanValueOfVariable( String variableName, boolean defaultValue ) { - if ( !Const.isEmpty( variableName ) ) { - String value = environmentSubstitute( variableName ); - if ( !Const.isEmpty( value ) ) { - return ValueMeta.convertStringToBoolean( value ); - } + public boolean getBooleanValueOfVariable(String variableName, boolean defaultValue) { + if (!Const.isEmpty(variableName)) { + String value = environmentSubstitute(variableName); + if (!Const.isEmpty(value)) { + return ValueMeta.convertStringToBoolean(value); + } + } + return defaultValue; } - return defaultValue; - } - /* + /* * (non-Javadoc) * * @see * org.pentaho.di.core.variables.VariableSpace#initializeVariablesFrom(org.pentaho.di.core.variables.VariableSpace) */ - public void initializeVariablesFrom( VariableSpace parent ) { - variables.initializeVariablesFrom( parent ); - } + public void initializeVariablesFrom(VariableSpace parent) { + variables.initializeVariablesFrom(parent); + } - /* + /* * (non-Javadoc) * * @see org.pentaho.di.core.variables.VariableSpace#listVariables() */ - public String[] listVariables() { - return variables.listVariables(); - } + public String[] listVariables() { + return variables.listVariables(); + } - /* + /* * (non-Javadoc) * * @see org.pentaho.di.core.variables.VariableSpace#setVariable(java.lang.String, java.lang.String) */ - public void setVariable( String variableName, String variableValue ) { - variables.setVariable( variableName, variableValue ); - } + public void setVariable(String variableName, String variableValue) { + variables.setVariable(variableName, variableValue); + } - /* + /* * (non-Javadoc) * * @see org.pentaho.di.core.variables.VariableSpace#shareVariablesWith(org.pentaho.di.core.variables.VariableSpace) */ - public void shareVariablesWith( VariableSpace space ) { - variables = space; - } + public void shareVariablesWith(VariableSpace space) { + variables = space; + } - /* + /* * (non-Javadoc) * * @see org.pentaho.di.core.variables.VariableSpace#injectVariables(java.util.Map) */ - public void injectVariables( Map prop ) { - variables.injectVariables( prop ); - } + public void injectVariables(Map prop) { + variables.injectVariables(prop); + } - /** - * Gets the status. - * - * @return the status - */ - public String getStatus() { - String message; - - if ( !initialized.get() ) { - message = Trans.STRING_WAITING; - } else { - if ( active.get() ) { - if ( stopped.get() ) { - message = Trans.STRING_HALTING; - } else { - message = Trans.STRING_RUNNING; - } - } else { - if ( stopped.get() ) { - message = Trans.STRING_STOPPED; + /** + * Gets the status. + * + * @return the status + */ + public String getStatus() { + String message; + + if (!initialized.get()) { + message = Trans.STRING_WAITING; } else { - message = Trans.STRING_FINISHED; - } - if ( result != null && result.getNrErrors() > 0 ) { - message += " (with errors)"; + if (active.get()) { + if (stopped.get()) { + message = Trans.STRING_HALTING; + } else { + message = Trans.STRING_RUNNING; + } + } else { + if (stopped.get()) { + message = Trans.STRING_STOPPED; + } else { + message = Trans.STRING_FINISHED; + } + if (result != null && result.getNrErrors() > 0) { + message += " (with errors)"; + } + } } - } - } - return message; - } + return message; + } - /** - * Send to slave server. - * - * @param jobMeta - * the job meta - * @param executionConfiguration - * the execution configuration - * @param repository - * the repository - * @param metaStore - * the metaStore - * @return the string - * @throws KettleException - * the kettle exception - */ - public static String sendToSlaveServer( JobMeta jobMeta, JobExecutionConfiguration executionConfiguration, - Repository repository, IMetaStore metaStore ) throws KettleException { - String carteObjectId; - SlaveServer slaveServer = executionConfiguration.getRemoteServer(); + /** + * Send to slave server. + * + * @param jobMeta the job meta + * @param executionConfiguration the execution configuration + * @param repository the repository + * @param metaStore the metaStore + * @return the string + * @throws KettleException the kettle exception + */ + public static String sendToSlaveServer(JobMeta jobMeta, JobExecutionConfiguration executionConfiguration, + Repository repository, IMetaStore metaStore) throws KettleException { + String carteObjectId; + SlaveServer slaveServer = executionConfiguration.getRemoteServer(); + + if (slaveServer == null) { + throw new KettleException(BaseMessages.getString(PKG, "Job.Log.NoSlaveServerSpecified")); + } + if (Const.isEmpty(jobMeta.getName())) { + throw new KettleException(BaseMessages.getString(PKG, "Job.Log.UniqueJobName")); + } - if ( slaveServer == null ) { - throw new KettleException( BaseMessages.getString( PKG, "Job.Log.NoSlaveServerSpecified" ) ); - } - if ( Const.isEmpty( jobMeta.getName() ) ) { - throw new KettleException( BaseMessages.getString( PKG, "Job.Log.UniqueJobName" ) ); - } + // Align logging levels between execution configuration and remote server + slaveServer.getLogChannel().setLogLevel(executionConfiguration.getLogLevel()); - // Align logging levels between execution configuration and remote server - slaveServer.getLogChannel().setLogLevel( executionConfiguration.getLogLevel() ); + try { + // Inject certain internal variables to make it more intuitive. + // + for (String var : Const.INTERNAL_TRANS_VARIABLES) { + executionConfiguration.getVariables().put(var, jobMeta.getVariable(var)); + } + for (String var : Const.INTERNAL_JOB_VARIABLES) { + executionConfiguration.getVariables().put(var, jobMeta.getVariable(var)); + } - try { - // Inject certain internal variables to make it more intuitive. - // - for ( String var : Const.INTERNAL_TRANS_VARIABLES ) { - executionConfiguration.getVariables().put( var, jobMeta.getVariable( var ) ); - } - for ( String var : Const.INTERNAL_JOB_VARIABLES ) { - executionConfiguration.getVariables().put( var, jobMeta.getVariable( var ) ); - } + if (executionConfiguration.isPassingExport()) { + // First export the job... slaveServer.getVariable("MASTER_HOST") + // + FileObject tempFile = + KettleVFS.createTempFile("jobExport", ".zip", System.getProperty("java.io.tmpdir"), jobMeta); - if ( executionConfiguration.isPassingExport() ) { - // First export the job... slaveServer.getVariable("MASTER_HOST") - // - FileObject tempFile = - KettleVFS.createTempFile( "jobExport", ".zip", System.getProperty( "java.io.tmpdir" ), jobMeta ); + TopLevelResource topLevelResource = + ResourceUtil.serializeResourceExportInterface(tempFile.getName().toString(), jobMeta, jobMeta, repository, + metaStore, executionConfiguration.getXML(), CONFIGURATION_IN_EXPORT_FILENAME); - TopLevelResource topLevelResource = - ResourceUtil.serializeResourceExportInterface( tempFile.getName().toString(), jobMeta, jobMeta, repository, - metaStore, executionConfiguration.getXML(), CONFIGURATION_IN_EXPORT_FILENAME ); + // Send the zip file over to the slave server... + // + String result = + slaveServer.sendExport(topLevelResource.getArchiveName(), AddExportServlet.TYPE_JOB, topLevelResource + .getBaseResourceName()); + WebResult webResult = WebResult.fromXMLString(result); + if (!webResult.getResult().equalsIgnoreCase(WebResult.STRING_OK)) { + throw new KettleException("There was an error passing the exported job to the remote server: " + Const.CR + + webResult.getMessage()); + } + carteObjectId = webResult.getId(); + } else { + String xml = new JobConfiguration(jobMeta, executionConfiguration).getXML(); + + String reply = slaveServer.sendXML(xml, RegisterJobServlet.CONTEXT_PATH + "/?xml=Y"); + WebResult webResult = WebResult.fromXMLString(reply); + if (!webResult.getResult().equalsIgnoreCase(WebResult.STRING_OK)) { + throw new KettleException("There was an error posting the job on the remote server: " + Const.CR + webResult + .getMessage()); + } + carteObjectId = webResult.getId(); + } - // Send the zip file over to the slave server... - // - String result = - slaveServer.sendExport( topLevelResource.getArchiveName(), AddExportServlet.TYPE_JOB, topLevelResource - .getBaseResourceName() ); - WebResult webResult = WebResult.fromXMLString( result ); - if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) { - throw new KettleException( "There was an error passing the exported job to the remote server: " + Const.CR - + webResult.getMessage() ); + // Start the job + // + String reply = + slaveServer.execService(StartJobServlet.CONTEXT_PATH + "/?name=" + URLEncoder.encode(jobMeta.getName(), + "UTF-8") + "&xml=Y&id=" + carteObjectId); + WebResult webResult = WebResult.fromXMLString(reply); + if (!webResult.getResult().equalsIgnoreCase(WebResult.STRING_OK)) { + throw new KettleException("There was an error starting the job on the remote server: " + Const.CR + webResult + .getMessage()); + } + return carteObjectId; + } catch (KettleException ke) { + throw ke; + } catch (Exception e) { + throw new KettleException(e); } - carteObjectId = webResult.getId(); - } else { - String xml = new JobConfiguration( jobMeta, executionConfiguration ).getXML(); - - String reply = slaveServer.sendXML( xml, RegisterJobServlet.CONTEXT_PATH + "/?xml=Y" ); - WebResult webResult = WebResult.fromXMLString( reply ); - if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) { - throw new KettleException( "There was an error posting the job on the remote server: " + Const.CR + webResult - .getMessage() ); + } + + /** + * Add a job listener to the job + * + * @param jobListener the job listener to add + */ + public void addJobListener(JobListener jobListener) { + synchronized (jobListeners) { + jobListeners.add(jobListener); } - carteObjectId = webResult.getId(); - } - - // Start the job - // - String reply = - slaveServer.execService( StartJobServlet.CONTEXT_PATH + "/?name=" + URLEncoder.encode( jobMeta.getName(), - "UTF-8" ) + "&xml=Y&id=" + carteObjectId ); - WebResult webResult = WebResult.fromXMLString( reply ); - if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) { - throw new KettleException( "There was an error starting the job on the remote server: " + Const.CR + webResult - .getMessage() ); - } - return carteObjectId; - } catch ( KettleException ke ) { - throw ke; - } catch ( Exception e ) { - throw new KettleException( e ); - } - } - - /** - * Add a job listener to the job - * - * @param jobListener - * the job listener to add - */ - public void addJobListener( JobListener jobListener ) { - synchronized ( jobListeners ) { - jobListeners.add( jobListener ); } - } - /** - * Adds the job entry listener. - * - * @param jobEntryListener - * the job entry listener - */ - public void addJobEntryListener( JobEntryListener jobEntryListener ) { - jobEntryListeners.add( jobEntryListener ); - } + /** + * Adds the job entry listener. + * + * @param jobEntryListener the job entry listener + */ + public void addJobEntryListener(JobEntryListener jobEntryListener) { + jobEntryListeners.add(jobEntryListener); + } - /** - * Remove a job listener from the job - * - * @param jobListener - * the job listener to remove - */ - public void removeJobListener( JobListener jobListener ) { - synchronized ( jobListeners ) { - jobListeners.remove( jobListener ); + /** + * Remove a job listener from the job + * + * @param jobListener the job listener to remove + */ + public void removeJobListener(JobListener jobListener) { + synchronized (jobListeners) { + jobListeners.remove(jobListener); + } } - } - /** - * Remove a job entry listener from the job - * - * @param jobListener - * the job entry listener to remove - */ - public void removeJobEntryListener( JobEntryListener jobEntryListener ) { - jobEntryListeners.remove( jobEntryListener ); - } + /** + * Remove a job entry listener from the job + * + * @param jobListener the job entry listener to remove + */ + public void removeJobEntryListener(JobEntryListener jobEntryListener) { + jobEntryListeners.remove(jobEntryListener); + } - /** - * Gets the job entry listeners. - * - * @return the job entry listeners - */ - public List getJobEntryListeners() { - return jobEntryListeners; - } + /** + * Gets the job entry listeners. + * + * @return the job entry listeners + */ + public List getJobEntryListeners() { + return jobEntryListeners; + } - /** - * Gets the job listeners. - * - * @return the job listeners - */ - public List getJobListeners() { - synchronized ( jobListeners ) { - return new ArrayList( jobListeners ); + /** + * Gets the job listeners. + * + * @return the job listeners + */ + public List getJobListeners() { + synchronized (jobListeners) { + return new ArrayList(jobListeners); + } } - } - /** - * Gets the boolean value of finished. - * - * @return the finished - */ - public boolean isFinished() { - return finished.get(); - } + /** + * Gets the boolean value of finished. + * + * @return the finished + */ + public boolean isFinished() { + return finished.get(); + } - /** - * Sets the value of finished. - * - * @param finished - * the finished to set - */ - public void setFinished( boolean finished ) { - this.finished.set( finished ); - } + /** + * Sets the value of finished. + * + * @param finished the finished to set + */ + public void setFinished(boolean finished) { + this.finished.set(finished); + } - /* + /* * (non-Javadoc) * * @see org.pentaho.di.core.parameters.NamedParams#addParameterDefinition(java.lang.String, java.lang.String, * java.lang.String) */ - public void addParameterDefinition( String key, String defValue, String description ) throws DuplicateParamException { - namedParams.addParameterDefinition( key, defValue, description ); - } + public void addParameterDefinition(String key, String defValue, String description) throws DuplicateParamException { + namedParams.addParameterDefinition(key, defValue, description); + } - /* + /* * (non-Javadoc) * * @see org.pentaho.di.core.parameters.NamedParams#getParameterDescription(java.lang.String) */ - public String getParameterDescription( String key ) throws UnknownParamException { - return namedParams.getParameterDescription( key ); - } + public String getParameterDescription(String key) throws UnknownParamException { + return namedParams.getParameterDescription(key); + } - /* + /* * (non-Javadoc) * * @see org.pentaho.di.core.parameters.NamedParams#getParameterDefault(java.lang.String) */ - public String getParameterDefault( String key ) throws UnknownParamException { - return namedParams.getParameterDefault( key ); - } + public String getParameterDefault(String key) throws UnknownParamException { + return namedParams.getParameterDefault(key); + } - /* + /* * (non-Javadoc) * * @see org.pentaho.di.core.parameters.NamedParams#getParameterValue(java.lang.String) */ - public String getParameterValue( String key ) throws UnknownParamException { - return namedParams.getParameterValue( key ); - } + public String getParameterValue(String key) throws UnknownParamException { + return namedParams.getParameterValue(key); + } - /* + /* * (non-Javadoc) * * @see org.pentaho.di.core.parameters.NamedParams#listParameters() */ - public String[] listParameters() { - return namedParams.listParameters(); - } + public String[] listParameters() { + return namedParams.listParameters(); + } - /* + /* * (non-Javadoc) * * @see org.pentaho.di.core.parameters.NamedParams#setParameterValue(java.lang.String, java.lang.String) */ - public void setParameterValue( String key, String value ) throws UnknownParamException { - namedParams.setParameterValue( key, value ); - } + public void setParameterValue(String key, String value) throws UnknownParamException { + namedParams.setParameterValue(key, value); + } - /* + /* * (non-Javadoc) * * @see org.pentaho.di.core.parameters.NamedParams#eraseParameters() */ - public void eraseParameters() { - namedParams.eraseParameters(); - } + public void eraseParameters() { + namedParams.eraseParameters(); + } - /* + /* * (non-Javadoc) * * @see org.pentaho.di.core.parameters.NamedParams#clearParameters() */ - public void clearParameters() { - namedParams.clearParameters(); - } + public void clearParameters() { + namedParams.clearParameters(); + } - /* + /* * (non-Javadoc) * * @see org.pentaho.di.core.parameters.NamedParams#activateParameters() */ - public void activateParameters() { - String[] keys = listParameters(); - - for ( String key : keys ) { - String value; - try { - value = getParameterValue( key ); - } catch ( UnknownParamException e ) { - value = ""; - } - String defValue; - try { - defValue = getParameterDefault( key ); - } catch ( UnknownParamException e ) { - defValue = ""; - } - - if ( Const.isEmpty( value ) ) { - setVariable( key, Const.NVL( defValue, "" ) ); - } else { - setVariable( key, Const.NVL( value, "" ) ); - } - } - } - - /* + public void activateParameters() { + String[] keys = listParameters(); + + for (String key : keys) { + String value; + try { + value = getParameterValue(key); + } catch (UnknownParamException e) { + value = ""; + } + String defValue; + try { + defValue = getParameterDefault(key); + } catch (UnknownParamException e) { + defValue = ""; + } + + if (Const.isEmpty(value)) { + setVariable(key, Const.NVL(defValue, "")); + } else { + setVariable(key, Const.NVL(value, "")); + } + } + } + + /* * (non-Javadoc) * * @see org.pentaho.di.core.parameters.NamedParams#copyParametersFrom(org.pentaho.di.core.parameters.NamedParams) */ - public void copyParametersFrom( NamedParams params ) { - namedParams.copyParametersFrom( params ); - } - - /** - * Sets the socket repository. - * - * @param socketRepository - * the new socket repository - */ - public void setSocketRepository( SocketRepository socketRepository ) { - this.socketRepository = socketRepository; - } + public void copyParametersFrom(NamedParams params) { + namedParams.copyParametersFrom(params); + } - /** - * Gets the socket repository. - * - * @return the socket repository - */ - public SocketRepository getSocketRepository() { - return socketRepository; - } + /** + * Sets the socket repository. + * + * @param socketRepository the new socket repository + */ + public void setSocketRepository(SocketRepository socketRepository) { + this.socketRepository = socketRepository; + } - /** - * Gets the log channel interface. - * - * @return LogChannelInterface - */ - public LogChannelInterface getLogChannel() { - return log; - } + /** + * Gets the socket repository. + * + * @return the socket repository + */ + public SocketRepository getSocketRepository() { + return socketRepository; + } - /** - * Gets the job name. - * - * @return jobName - */ - public String getObjectName() { - return getJobname(); - } + /** + * Gets the log channel interface. + * + * @return LogChannelInterface + */ + public LogChannelInterface getLogChannel() { + return log; + } - /** - * Always returns null for Job. - * - * @return null - */ - public String getObjectCopy() { - return null; - } + /** + * Gets the job name. + * + * @return jobName + */ + public String getObjectName() { + return getJobname(); + } - /** - * Gets the file name. - * - * @return the filename - */ - public String getFilename() { - if ( jobMeta == null ) { - return null; + /** + * Always returns null for Job. + * + * @return null + */ + public String getObjectCopy() { + return null; } - return jobMeta.getFilename(); - } - /** - * Gets the log channel id. - * - * @return the logChannelId - */ - public String getLogChannelId() { - return log.getLogChannelId(); - } + /** + * Gets the file name. + * + * @return the filename + */ + public String getFilename() { + if (jobMeta == null) { + return null; + } + return jobMeta.getFilename(); + } - /** - * Gets the jobMeta's object id. - * - * @return ObjectId - */ - public ObjectId getObjectId() { - if ( jobMeta == null ) { - return null; + /** + * Gets the log channel id. + * + * @return the logChannelId + */ + public String getLogChannelId() { + return log.getLogChannelId(); } - return jobMeta.getObjectId(); - } - /** - * Gets the job meta's object revision. - * - * @return ObjectRevision - */ - public ObjectRevision getObjectRevision() { - if ( jobMeta == null ) { - return null; + /** + * Gets the jobMeta's object id. + * + * @return ObjectId + */ + public ObjectId getObjectId() { + if (jobMeta == null) { + return null; + } + return jobMeta.getObjectId(); } - return jobMeta.getObjectRevision(); - } - /** - * Gets LoggingObjectType.JOB, which is always the value for Job. - * - * @return LoggingObjectType LoggingObjectType.JOB - */ - public LoggingObjectType getObjectType() { - return LoggingObjectType.JOB; - } + /** + * Gets the job meta's object revision. + * + * @return ObjectRevision + */ + public ObjectRevision getObjectRevision() { + if (jobMeta == null) { + return null; + } + return jobMeta.getObjectRevision(); + } - /** - * Gets parent logging object. - * - * @return parentLoggingObject - */ - public LoggingObjectInterface getParent() { - return parentLoggingObject; - } + /** + * Gets LoggingObjectType.JOB, which is always the value for Job. + * + * @return LoggingObjectType LoggingObjectType.JOB + */ + public LoggingObjectType getObjectType() { + return LoggingObjectType.JOB; + } - /** - * Gets the job meta's repository directory interface. - * - * @return RepositoryDirectoryInterface - */ - public RepositoryDirectoryInterface getRepositoryDirectory() { - if ( jobMeta == null ) { - return null; + /** + * Gets parent logging object. + * + * @return parentLoggingObject + */ + public LoggingObjectInterface getParent() { + return parentLoggingObject; } - return jobMeta.getRepositoryDirectory(); - } - /** - * Gets the logLevel. - * - * @return logLevel - */ - public LogLevel getLogLevel() { - return logLevel; - } + /** + * Gets the job meta's repository directory interface. + * + * @return RepositoryDirectoryInterface + */ + public RepositoryDirectoryInterface getRepositoryDirectory() { + if (jobMeta == null) { + return null; + } + return jobMeta.getRepositoryDirectory(); + } - /** - * Sets the log level. - * - * @param logLevel - * the new log level - */ - public void setLogLevel( LogLevel logLevel ) { - this.logLevel = logLevel; - log.setLogLevel( logLevel ); - } + /** + * Gets the logLevel. + * + * @return logLevel + */ + public LogLevel getLogLevel() { + return logLevel; + } - /** - * Gets the logging hierarchy. - * - * @return the logging hierarchy - */ - public List getLoggingHierarchy() { - List hierarchy = new ArrayList(); - List childIds = LoggingRegistry.getInstance().getLogChannelChildren( getLogChannelId() ); - for ( String childId : childIds ) { - LoggingObjectInterface loggingObject = LoggingRegistry.getInstance().getLoggingObject( childId ); - if ( loggingObject != null ) { - hierarchy.add( new LoggingHierarchy( getLogChannelId(), batchId, loggingObject ) ); - } + /** + * Sets the log level. + * + * @param logLevel the new log level + */ + public void setLogLevel(LogLevel logLevel) { + this.logLevel = logLevel; + log.setLogLevel(logLevel); } - return hierarchy; - } + /** + * Gets the logging hierarchy. + * + * @return the logging hierarchy + */ + public List getLoggingHierarchy() { + List hierarchy = new ArrayList(); + List childIds = LoggingRegistry.getInstance().getLogChannelChildren(getLogChannelId()); + for (String childId : childIds) { + LoggingObjectInterface loggingObject = LoggingRegistry.getInstance().getLoggingObject(childId); + if (loggingObject != null) { + hierarchy.add(new LoggingHierarchy(getLogChannelId(), batchId, loggingObject)); + } + } - /** - * Gets the boolean value of interactive. - * - * @return the interactive - */ - public boolean isInteractive() { - return interactive; - } + return hierarchy; + } - /** - * Sets the value of interactive. - * - * @param interactive - * the interactive to set - */ - public void setInteractive( boolean interactive ) { - this.interactive = interactive; - } + /** + * Gets the boolean value of interactive. + * + * @return the interactive + */ + public boolean isInteractive() { + return interactive; + } - /** - * Gets the activeJobEntryTransformations. - * - * @return the activeJobEntryTransformations - */ - public Map getActiveJobEntryTransformations() { - return activeJobEntryTransformations; - } + /** + * Sets the value of interactive. + * + * @param interactive the interactive to set + */ + public void setInteractive(boolean interactive) { + this.interactive = interactive; + } - /** - * Gets the activeJobEntryJobs. - * - * @return the activeJobEntryJobs - */ - public Map getActiveJobEntryJobs() { - return activeJobEntryJobs; - } + /** + * Gets the activeJobEntryTransformations. + * + * @return the activeJobEntryTransformations + */ + public Map getActiveJobEntryTransformations() { + return activeJobEntryTransformations; + } - /** - * Gets a flat list of results in THIS job, in the order of execution of job entries. - * - * @return A flat list of results in THIS job, in the order of execution of job entries - */ - public List getJobEntryResults() { - synchronized ( jobEntryResults ) { - return new ArrayList( jobEntryResults ); + /** + * Gets the activeJobEntryJobs. + * + * @return the activeJobEntryJobs + */ + public Map getActiveJobEntryJobs() { + return activeJobEntryJobs; } - } - /** - * Gets the carteObjectId. - * - * @return the carteObjectId - */ - public String getContainerObjectId() { - return containerObjectId; - } + /** + * Gets a flat list of results in THIS job, in the order of execution of job entries. + * + * @return A flat list of results in THIS job, in the order of execution of job entries + */ + public List getJobEntryResults() { + synchronized (jobEntryResults) { + return new ArrayList(jobEntryResults); + } + } - /** - * Sets the execution container object id (containerObjectId). - * - * @param containerObjectId - * the execution container object id to set - */ - public void setContainerObjectId( String containerObjectId ) { - this.containerObjectId = containerObjectId; - } + /** + * Gets the carteObjectId. + * + * @return the carteObjectId + */ + public String getContainerObjectId() { + return containerObjectId; + } - /** - * Gets the parent logging object. - * - * @return the parent logging object - */ - public LoggingObjectInterface getParentLoggingObject() { - return parentLoggingObject; - } + /** + * Sets the execution container object id (containerObjectId). + * + * @param containerObjectId the execution container object id to set + */ + public void setContainerObjectId(String containerObjectId) { + this.containerObjectId = containerObjectId; + } - /** - * Gets the registration date. For job, this always returns null - * - * @return null - */ - public Date getRegistrationDate() { - return null; - } + /** + * Gets the parent logging object. + * + * @return the parent logging object + */ + public LoggingObjectInterface getParentLoggingObject() { + return parentLoggingObject; + } - /** - * Gets the start job entry copy. - * - * @return the startJobEntryCopy - */ - public JobEntryCopy getStartJobEntryCopy() { - return startJobEntryCopy; - } + /** + * Gets the registration date. For job, this always returns null + * + * @return null + */ + public Date getRegistrationDate() { + return null; + } - /** - * Sets the start job entry copy. - * - * @param startJobEntryCopy - * the startJobEntryCopy to set - */ - public void setStartJobEntryCopy( JobEntryCopy startJobEntryCopy ) { - this.startJobEntryCopy = startJobEntryCopy; - } + /** + * Gets the start job entry copy. + * + * @return the startJobEntryCopy + */ + public JobEntryCopy getStartJobEntryCopy() { + return startJobEntryCopy; + } - /** - * Gets the executing server. - * - * @return the executingServer - */ - public String getExecutingServer() { - return executingServer; - } + /** + * Sets the start job entry copy. + * + * @param startJobEntryCopy the startJobEntryCopy to set + */ + public void setStartJobEntryCopy(JobEntryCopy startJobEntryCopy) { + this.startJobEntryCopy = startJobEntryCopy; + } - /** - * Sets the executing server. - * - * @param executingServer - * the executingServer to set - */ - public void setExecutingServer( String executingServer ) { - this.executingServer = executingServer; - } + /** + * Gets the executing server. + * + * @return the executingServer + */ + public String getExecutingServer() { + return executingServer; + } - /** - * Gets the executing user. - * - * @return the executingUser - */ - public String getExecutingUser() { - return executingUser; - } + /** + * Sets the executing server. + * + * @param executingServer the executingServer to set + */ + public void setExecutingServer(String executingServer) { + this.executingServer = executingServer; + } - /** - * Sets the executing user. - * - * @param executingUser - * the executingUser to set - */ - public void setExecutingUser( String executingUser ) { - this.executingUser = executingUser; - } + /** + * Gets the executing user. + * + * @return the executingUser + */ + public String getExecutingUser() { + return executingUser; + } - @Override - public boolean isGatheringMetrics() { - return log != null && log.isGatheringMetrics(); - } + /** + * Sets the executing user. + * + * @param executingUser the executingUser to set + */ + public void setExecutingUser(String executingUser) { + this.executingUser = executingUser; + } - @Override - public void setGatheringMetrics( boolean gatheringMetrics ) { - if ( log != null ) { - log.setGatheringMetrics( gatheringMetrics ); + @Override + public boolean isGatheringMetrics() { + return log != null && log.isGatheringMetrics(); } - } - @Override - public boolean isForcingSeparateLogging() { - return log != null && log.isForcingSeparateLogging(); - } + @Override + public void setGatheringMetrics(boolean gatheringMetrics) { + if (log != null) { + log.setGatheringMetrics(gatheringMetrics); + } + } - @Override - public void setForcingSeparateLogging( boolean forcingSeparateLogging ) { - if ( log != null ) { - log.setForcingSeparateLogging( forcingSeparateLogging ); + @Override + public boolean isForcingSeparateLogging() { + return log != null && log.isForcingSeparateLogging(); } - } - /** - * Gets the transaction id. - * - * @return the transactionId - */ - public String getTransactionId() { - return transactionId; - } + @Override + public void setForcingSeparateLogging(boolean forcingSeparateLogging) { + if (log != null) { + log.setForcingSeparateLogging(forcingSeparateLogging); + } + } - /** - * Sets the transaction id. - * - * @param transactionId - * the transactionId to set - */ - public void setTransactionId( String transactionId ) { - this.transactionId = transactionId; - } + /** + * Gets the transaction id. + * + * @return the transactionId + */ + public String getTransactionId() { + return transactionId; + } + + /** + * Sets the transaction id. + * + * @param transactionId the transactionId to set + */ + public void setTransactionId(String transactionId) { + this.transactionId = transactionId; + } - public List getDelegationListeners() { - return delegationListeners; - } + public List getDelegationListeners() { + return delegationListeners; + } - public void setDelegationListeners( List delegationListeners ) { - this.delegationListeners = delegationListeners; - } + public void setDelegationListeners(List delegationListeners) { + this.delegationListeners = delegationListeners; + } - public void addDelegationListener( DelegationListener delegationListener ) { - delegationListeners.add( delegationListener ); - } + public void addDelegationListener(DelegationListener delegationListener) { + delegationListeners.add(delegationListener); + } - public String[] getArguments() { - return arguments; - } + public String[] getArguments() { + return arguments; + } - public void setArguments( String[] arguments ) { - this.arguments = arguments; - } + public void setArguments(String[] arguments) { + this.arguments = arguments; + } - public Trans getParentTrans() { - return parentTrans; - } + public Trans getParentTrans() { + return parentTrans; + } - public void setParentTrans( Trans parentTrans ) { - this.parentTrans = parentTrans; - } + public void setParentTrans(Trans parentTrans) { + this.parentTrans = parentTrans; + } - public Map getExtensionDataMap() { - return extensionDataMap; - } + public Map getExtensionDataMap() { + return extensionDataMap; + } - public Result getStartJobEntryResult() { - return startJobEntryResult; - } + public Result getStartJobEntryResult() { + return startJobEntryResult; + } - public void setStartJobEntryResult( Result startJobEntryResult ) { - this.startJobEntryResult = startJobEntryResult; - } + public void setStartJobEntryResult(Result startJobEntryResult) { + this.startJobEntryResult = startJobEntryResult; + } - protected ExecutorService startHeartbeat( final long intervalInSeconds ) { + protected ExecutorService startHeartbeat(final long intervalInSeconds) { - final ScheduledExecutorService heartbeat = Executors.newSingleThreadScheduledExecutor( new ThreadFactory() { + final ScheduledExecutorService heartbeat = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() { - @Override - public Thread newThread( Runnable r ) { - Thread thread = new Thread( r, "Job Heartbeat Thread for: " + getName() ); - thread.setDaemon( true ); - return thread; - } - } ); + @Override + public Thread newThread(Runnable r) { + Thread thread = new Thread(r, "Job Heartbeat Thread for: " + getName()); + thread.setDaemon(true); + return thread; + } + }); - heartbeat.scheduleAtFixedRate( new Runnable() { - public void run() { + heartbeat.scheduleAtFixedRate(new Runnable() { + public void run() { - if ( Job.this.isFinished() ) { - log.logBasic( "Shutting down heartbeat signal for " + jobMeta.getName() ); - shutdownHeartbeat( heartbeat ); - return; - } + if (Job.this.isFinished()) { + log.logBasic("Shutting down heartbeat signal for " + jobMeta.getName()); + shutdownHeartbeat(heartbeat); + return; + } - try { + try { - log.logDebug( "Triggering heartbeat signal for " + jobMeta.getName() + " at every " + intervalInSeconds - + " seconds" ); - ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.JobHeartbeat.id, Job.this ); + log.logDebug("Triggering heartbeat signal for " + jobMeta.getName() + " at every " + intervalInSeconds + + " seconds"); + ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.JobHeartbeat.id, Job.this); - } catch ( KettleException e ) { - log.logError( e.getMessage(), e ); - } - } - }, intervalInSeconds /* initial delay */, intervalInSeconds /* interval delay */, TimeUnit.SECONDS ); + } catch (KettleException e) { + log.logError(e.getMessage(), e); + } + } + }, intervalInSeconds /* initial delay */, intervalInSeconds /* interval delay */, TimeUnit.SECONDS); - return heartbeat; - } + return heartbeat; + } - protected void shutdownHeartbeat( ExecutorService heartbeat ) { + protected void shutdownHeartbeat(ExecutorService heartbeat) { - if ( heartbeat != null ) { + if (heartbeat != null) { - try { - heartbeat.shutdownNow(); // prevents waiting tasks from starting and attempts to stop currently executing ones + try { + heartbeat.shutdownNow(); // prevents waiting tasks from starting and attempts to stop currently executing ones - } catch ( Throwable t ) { + } catch (Throwable t) { /* do nothing */ - } + } + } } - } - private int getHeartbeatIntervalInSeconds() { + private int getHeartbeatIntervalInSeconds() { - JobMeta meta = this.jobMeta; + JobMeta meta = this.jobMeta; - // 1 - check if there's a user defined value ( job-specific ) heartbeat periodic interval; - // 2 - check if there's a default defined value ( job-specific ) heartbeat periodic interval; - // 3 - use default Const.HEARTBEAT_PERIODIC_INTERVAL_IN_SECS if none of the above have been set + // 1 - check if there's a user defined value ( job-specific ) heartbeat periodic interval; + // 2 - check if there's a default defined value ( job-specific ) heartbeat periodic interval; + // 3 - use default Const.HEARTBEAT_PERIODIC_INTERVAL_IN_SECS if none of the above have been set - try { + try { - if ( meta != null ) { + if (meta != null) { - return Const.toInt( meta.getParameterValue( Const.VARIABLE_HEARTBEAT_PERIODIC_INTERVAL_SECS ), Const.toInt( meta - .getParameterDefault( Const.VARIABLE_HEARTBEAT_PERIODIC_INTERVAL_SECS ), - Const.HEARTBEAT_PERIODIC_INTERVAL_IN_SECS ) ); - } + return Const.toInt(meta.getParameterValue(Const.VARIABLE_HEARTBEAT_PERIODIC_INTERVAL_SECS), Const.toInt(meta + .getParameterDefault(Const.VARIABLE_HEARTBEAT_PERIODIC_INTERVAL_SECS), + Const.HEARTBEAT_PERIODIC_INTERVAL_IN_SECS)); + } - } catch ( Exception e ) { + } catch (Exception e) { /* do nothing, return Const.HEARTBEAT_PERIODIC_INTERVAL_IN_SECS */ - } + } - return Const.HEARTBEAT_PERIODIC_INTERVAL_IN_SECS; - } + return Const.HEARTBEAT_PERIODIC_INTERVAL_IN_SECS; + } } diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/job/JobMeta.java b/pentaho-kettle/src/main/java/org/pentaho/di/job/JobMeta.java index 5c16d7b..b4d0d89 100644 --- a/pentaho-kettle/src/main/java/org/pentaho/di/job/JobMeta.java +++ b/pentaho-kettle/src/main/java/org/pentaho/di/job/JobMeta.java @@ -29,37 +29,17 @@ import org.apache.commons.vfs2.FileSystemException; import org.pentaho.di.base.AbstractMeta; import org.pentaho.di.cluster.SlaveServer; -import org.pentaho.di.core.CheckResultInterface; -import org.pentaho.di.core.Const; -import org.pentaho.di.core.LastUsedFile; -import org.pentaho.di.core.NotePadMeta; -import org.pentaho.di.core.ProgressMonitorListener; -import org.pentaho.di.core.Props; -import org.pentaho.di.core.SQLStatement; +import org.pentaho.di.core.*; import org.pentaho.di.core.attributes.AttributesUtil; import org.pentaho.di.core.database.Database; import org.pentaho.di.core.database.DatabaseMeta; -import org.pentaho.di.core.exception.IdNotFoundException; -import org.pentaho.di.core.exception.KettleDatabaseException; -import org.pentaho.di.core.exception.KettleException; -import org.pentaho.di.core.exception.KettleFileException; -import org.pentaho.di.core.exception.KettleXMLException; -import org.pentaho.di.core.exception.LookupReferencesException; +import org.pentaho.di.core.exception.*; import org.pentaho.di.core.extension.ExtensionPointHandler; import org.pentaho.di.core.extension.KettleExtensionPoint; import org.pentaho.di.core.gui.OverwritePrompter; import org.pentaho.di.core.gui.Point; -import org.pentaho.di.core.logging.ChannelLogTable; -import org.pentaho.di.core.logging.JobEntryLogTable; -import org.pentaho.di.core.logging.JobLogTable; -import org.pentaho.di.core.logging.LogChannel; -import org.pentaho.di.core.logging.LogStatus; -import org.pentaho.di.core.logging.LogTableInterface; -import org.pentaho.di.core.logging.LogTablePluginInterface; +import org.pentaho.di.core.logging.*; import org.pentaho.di.core.logging.LogTablePluginInterface.TableType; -import org.pentaho.di.core.logging.LogTablePluginType; -import org.pentaho.di.core.logging.LoggingObjectInterface; -import org.pentaho.di.core.logging.LoggingObjectType; import org.pentaho.di.core.parameters.NamedParamsDefault; import org.pentaho.di.core.parameters.UnknownParamException; import org.pentaho.di.core.plugins.PluginInterface; @@ -77,11 +57,7 @@ import org.pentaho.di.job.entries.special.JobEntrySpecial; import org.pentaho.di.job.entry.JobEntryCopy; import org.pentaho.di.job.entry.JobEntryInterface; -import org.pentaho.di.repository.ObjectId; -import org.pentaho.di.repository.Repository; -import org.pentaho.di.repository.RepositoryDirectory; -import org.pentaho.di.repository.RepositoryElementInterface; -import org.pentaho.di.repository.RepositoryObjectType; +import org.pentaho.di.repository.*; import org.pentaho.di.resource.ResourceDefinition; import org.pentaho.di.resource.ResourceExportInterface; import org.pentaho.di.resource.ResourceNamingInterface; @@ -94,13 +70,7 @@ import org.w3c.dom.Node; import java.io.InputStream; -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; +import java.util.*; /** * The definition of a PDI job is represented by a JobMeta object. It is typically loaded from a .kjb file, a PDI @@ -112,2742 +82,2742 @@ * @since 11-08-2003 */ public class JobMeta extends AbstractMeta - implements Cloneable, Comparable, XMLInterface, ResourceExportInterface, RepositoryElementInterface, - LoggingObjectInterface { + implements Cloneable, Comparable, XMLInterface, ResourceExportInterface, RepositoryElementInterface, + LoggingObjectInterface { - private static Class PKG = JobMeta.class; // for i18n purposes, needed by Translator2!! + private static Class PKG = JobMeta.class; // for i18n purposes, needed by Translator2!! - public static final String XML_TAG = "job"; + public static final String XML_TAG = "job"; - protected static final String XML_TAG_SLAVESERVERS = "slaveservers"; + protected static final String XML_TAG_SLAVESERVERS = "slaveservers"; - /** - * A constant specifying the repository element type as a Job. - */ - public static final RepositoryObjectType REPOSITORY_ELEMENT_TYPE = RepositoryObjectType.JOB; + /** + * A constant specifying the repository element type as a Job. + */ + public static final RepositoryObjectType REPOSITORY_ELEMENT_TYPE = RepositoryObjectType.JOB; - protected String jobVersion; + protected String jobVersion; - protected int jobStatus; + protected int jobStatus; - protected List jobcopies; + protected List jobcopies; - protected List jobhops; + protected List jobhops; - protected String[] arguments; + protected String[] arguments; - protected boolean changedEntries, changedHops; + protected boolean changedEntries, changedHops; - protected JobLogTable jobLogTable; + protected JobLogTable jobLogTable; - protected JobEntryLogTable jobEntryLogTable; + protected JobEntryLogTable jobEntryLogTable; - protected List extraLogTables; + protected List extraLogTables; - /** - * Constant = "SPECIAL" - **/ - public static final String STRING_SPECIAL = "SPECIAL"; + /** + * Constant = "SPECIAL" + **/ + public static final String STRING_SPECIAL = "SPECIAL"; - /** - * Constant = "START" - **/ - public static final String STRING_SPECIAL_START = "START"; + /** + * Constant = "START" + **/ + public static final String STRING_SPECIAL_START = "START"; - /** - * Constant = "DUMMY" - **/ - public static final String STRING_SPECIAL_DUMMY = "DUMMY"; + /** + * Constant = "DUMMY" + **/ + public static final String STRING_SPECIAL_DUMMY = "DUMMY"; - /** - * Constant = "OK" - **/ - public static final String STRING_SPECIAL_OK = "OK"; + /** + * Constant = "OK" + **/ + public static final String STRING_SPECIAL_OK = "OK"; - /** - * Constant = "ERROR" - **/ - public static final String STRING_SPECIAL_ERROR = "ERROR"; + /** + * Constant = "ERROR" + **/ + public static final String STRING_SPECIAL_ERROR = "ERROR"; - /** - * The loop cache. - */ - protected Map loopCache; + /** + * The loop cache. + */ + protected Map loopCache; - /** - * List of booleans indicating whether or not to remember the size and position of the different windows... - */ - public boolean[] max = new boolean[1]; + /** + * List of booleans indicating whether or not to remember the size and position of the different windows... + */ + public boolean[] max = new boolean[1]; - protected boolean batchIdPassed; + protected boolean batchIdPassed; - protected static final String XML_TAG_PARAMETERS = "parameters"; + protected static final String XML_TAG_PARAMETERS = "parameters"; - private List missingEntries; + private List missingEntries; - /** - * Instantiates a new job meta. - */ - public JobMeta() { - clear(); - initializeVariablesFrom( null ); - } + /** + * Instantiates a new job meta. + */ + public JobMeta() { + clear(); + initializeVariablesFrom(null); + } - /** - * Clears or reinitializes many of the JobMeta properties. - */ - @Override - public void clear() { - jobcopies = new ArrayList(); - jobhops = new ArrayList(); - - jobLogTable = JobLogTable.getDefault( this, this ); - jobEntryLogTable = JobEntryLogTable.getDefault( this, this ); - extraLogTables = new ArrayList(); - - List plugins = PluginRegistry.getInstance().getPlugins( LogTablePluginType.class ); - for ( PluginInterface plugin : plugins ) { - try { - LogTablePluginInterface logTablePluginInterface = (LogTablePluginInterface) PluginRegistry.getInstance() - .loadClass( plugin ); - if ( logTablePluginInterface.getType() == TableType.JOB ) { - logTablePluginInterface.setContext( this, this ); - extraLogTables.add( logTablePluginInterface ); - } - } catch ( Exception e ) { - LogChannel.GENERAL.logError( "Error loading log table plugin with ID " + plugin.getIds()[0], e ); - } - } - - arguments = null; - - super.clear(); - loopCache = new HashMap(); - addDefaults(); - jobStatus = -1; - jobVersion = null; - - // setInternalKettleVariables(); Don't clear the internal variables for - // ad-hoc jobs, it's ruins the previews - // etc. - } - - /** - * Adds the defaults. - */ - public void addDefaults() { + /** + * Clears or reinitializes many of the JobMeta properties. + */ + @Override + public void clear() { + jobcopies = new ArrayList(); + jobhops = new ArrayList(); + + jobLogTable = JobLogTable.getDefault(this, this); + jobEntryLogTable = JobEntryLogTable.getDefault(this, this); + extraLogTables = new ArrayList(); + + List plugins = PluginRegistry.getInstance().getPlugins(LogTablePluginType.class); + for (PluginInterface plugin : plugins) { + try { + LogTablePluginInterface logTablePluginInterface = (LogTablePluginInterface) PluginRegistry.getInstance() + .loadClass(plugin); + if (logTablePluginInterface.getType() == TableType.JOB) { + logTablePluginInterface.setContext(this, this); + extraLogTables.add(logTablePluginInterface); + } + } catch (Exception e) { + LogChannel.GENERAL.logError("Error loading log table plugin with ID " + plugin.getIds()[0], e); + } + } + + arguments = null; + + super.clear(); + loopCache = new HashMap(); + addDefaults(); + jobStatus = -1; + jobVersion = null; + + // setInternalKettleVariables(); Don't clear the internal variables for + // ad-hoc jobs, it's ruins the previews + // etc. + } + + /** + * Adds the defaults. + */ + public void addDefaults() { /* * addStart(); // Add starting point! addDummy(); // Add dummy! addOK(); // errors == 0 evaluation addError(); // * errors != 0 evaluation */ - clearChanged(); - } + clearChanged(); + } - /** - * Creates the start entry. - * - * @return the job entry copy - */ - public static final JobEntryCopy createStartEntry() { - JobEntrySpecial jobEntrySpecial = new JobEntrySpecial( STRING_SPECIAL_START, true, false ); - JobEntryCopy jobEntry = new JobEntryCopy(); - jobEntry.setObjectId( null ); - jobEntry.setEntry( jobEntrySpecial ); - jobEntry.setLocation( 50, 50 ); - jobEntry.setDrawn( false ); - jobEntry.setDescription( BaseMessages.getString( PKG, "JobMeta.StartJobEntry.Description" ) ); - return jobEntry; - - } - - /** - * Creates the dummy entry. - * - * @return the job entry copy - */ - public static final JobEntryCopy createDummyEntry() { - JobEntrySpecial jobEntrySpecial = new JobEntrySpecial( STRING_SPECIAL_DUMMY, false, true ); - JobEntryCopy jobEntry = new JobEntryCopy(); - jobEntry.setObjectId( null ); - jobEntry.setEntry( jobEntrySpecial ); - jobEntry.setLocation( 50, 50 ); - jobEntry.setDrawn( false ); - jobEntry.setDescription( BaseMessages.getString( PKG, "JobMeta.DummyJobEntry.Description" ) ); - return jobEntry; - } - - /** - * Gets the start. - * - * @return the start - */ - public JobEntryCopy getStart() { - for ( int i = 0; i < nrJobEntries(); i++ ) { - JobEntryCopy cge = getJobEntry( i ); - if ( cge.isStart() ) { - return cge; - } - } - return null; - } - - /** - * Gets the dummy. - * - * @return the dummy - */ - public JobEntryCopy getDummy() { - for ( int i = 0; i < nrJobEntries(); i++ ) { - JobEntryCopy cge = getJobEntry( i ); - if ( cge.isDummy() ) { - return cge; - } - } - return null; - } - - /** - * Compares two transformation on name, filename - */ - public int compare( JobMeta j1, JobMeta j2 ) { - if ( Const.isEmpty( j1.getName() ) && !Const.isEmpty( j2.getName() ) ) { - return -1; - } - if ( !Const.isEmpty( j1.getName() ) && Const.isEmpty( j2.getName() ) ) { - return 1; - } - if ( Const.isEmpty( j1.getName() ) && Const.isEmpty( j2.getName() ) || j1.getName().equals( j2.getName() ) ) { - if ( Const.isEmpty( j1.getFilename() ) && !Const.isEmpty( j2.getFilename() ) ) { - return -1; - } - if ( !Const.isEmpty( j1.getFilename() ) && Const.isEmpty( j2.getFilename() ) ) { - return 1; - } - if ( Const.isEmpty( j1.getFilename() ) && Const.isEmpty( j2.getFilename() ) ) { - return 0; - } - return j1.getFilename().compareTo( j2.getFilename() ); - } - - // Compare by name : repositories etc. - // - if ( j1.getObjectRevision() != null && j2.getObjectRevision() == null ) { - return 1; - } - if ( j1.getObjectRevision() == null && j2.getObjectRevision() != null ) { - return -1; - } - int cmp; - if ( j1.getObjectRevision() == null && j2.getObjectRevision() == null ) { - cmp = 0; - } else { - cmp = j1.getObjectRevision().getName().compareTo( j2.getObjectRevision().getName() ); - } - if ( cmp == 0 ) { - return j1.getName().compareTo( j2.getName() ); - } else { - return cmp; - } - } - - /** - * Compares this job's meta-data to the specified job's meta-data. This method simply calls compare(this, o) - * - * @param o the o - * @return the int - * @see #compare(JobMeta, JobMeta) - * @see java.lang.Comparable#compareTo(java.lang.Object) - */ - public int compareTo( JobMeta o ) { - return compare( this, o ); - } - - /** - * Checks whether this job's meta-data object is equal to the specified object. If the specified object is not an - * instance of JobMeta, false is returned. Otherwise the method returns whether a call to compare() indicates equality - * (i.e. compare(this, (JobMeta)obj)==0). - * - * @param obj the obj - * @return true, if successful - * @see #compare(JobMeta, JobMeta) - * @see java.lang.Object#equals(java.lang.Object) - */ - public boolean equals( Object obj ) { - if ( !( obj instanceof JobMeta ) ) { - return false; + /** + * Creates the start entry. + * + * @return the job entry copy + */ + public static final JobEntryCopy createStartEntry() { + JobEntrySpecial jobEntrySpecial = new JobEntrySpecial(STRING_SPECIAL_START, true, false); + JobEntryCopy jobEntry = new JobEntryCopy(); + jobEntry.setObjectId(null); + jobEntry.setEntry(jobEntrySpecial); + jobEntry.setLocation(50, 50); + jobEntry.setDrawn(false); + jobEntry.setDescription(BaseMessages.getString(PKG, "JobMeta.StartJobEntry.Description")); + return jobEntry; + + } + + /** + * Creates the dummy entry. + * + * @return the job entry copy + */ + public static final JobEntryCopy createDummyEntry() { + JobEntrySpecial jobEntrySpecial = new JobEntrySpecial(STRING_SPECIAL_DUMMY, false, true); + JobEntryCopy jobEntry = new JobEntryCopy(); + jobEntry.setObjectId(null); + jobEntry.setEntry(jobEntrySpecial); + jobEntry.setLocation(50, 50); + jobEntry.setDrawn(false); + jobEntry.setDescription(BaseMessages.getString(PKG, "JobMeta.DummyJobEntry.Description")); + return jobEntry; + } + + /** + * Gets the start. + * + * @return the start + */ + public JobEntryCopy getStart() { + for (int i = 0; i < nrJobEntries(); i++) { + JobEntryCopy cge = getJobEntry(i); + if (cge.isStart()) { + return cge; + } + } + return null; } - return compare( this, (JobMeta) obj ) == 0; - } + /** + * Gets the dummy. + * + * @return the dummy + */ + public JobEntryCopy getDummy() { + for (int i = 0; i < nrJobEntries(); i++) { + JobEntryCopy cge = getJobEntry(i); + if (cge.isDummy()) { + return cge; + } + } + return null; + } - /** - * Clones the job meta-data object. - * - * @return a clone of the job meta-data object - * @see java.lang.Object#clone() - */ - public Object clone() { - return realClone( true ); - } - - /** - * Perform a real clone of the job meta-data object, including cloning all lists and copying all values. If the - * doClear parameter is true, the clone will be cleared of ALL values before the copy. If false, only the copied - * fields will be cleared. - * - * @param doClear Whether to clear all of the clone's data before copying from the source object - * @return a real clone of the calling object - */ - public Object realClone( boolean doClear ) { - try { - JobMeta jobMeta = (JobMeta) super.clone(); - if ( doClear ) { - jobMeta.clear(); - } else { - jobMeta.jobcopies = new ArrayList(); - jobMeta.jobhops = new ArrayList(); - jobMeta.notes = new ArrayList(); - jobMeta.databases = new ArrayList(); - jobMeta.slaveServers = new ArrayList(); - jobMeta.namedParams = new NamedParamsDefault(); - } - - for ( JobEntryCopy entry : jobcopies ) { - jobMeta.jobcopies.add( (JobEntryCopy) entry.clone_deep() ); - } - for ( JobHopMeta entry : jobhops ) { - jobMeta.jobhops.add( (JobHopMeta) entry.clone() ); - } - for ( NotePadMeta entry : notes ) { - jobMeta.notes.add( (NotePadMeta) entry.clone() ); - } - for ( DatabaseMeta entry : databases ) { - jobMeta.databases.add( (DatabaseMeta) entry.clone() ); - } - for ( SlaveServer slave : slaveServers ) { - jobMeta.getSlaveServers().add( (SlaveServer) slave.clone() ); - } - for ( String key : listParameters() ) { - jobMeta.addParameterDefinition( key, getParameterDefault( key ), getParameterDescription( key ) ); - } - return jobMeta; - } catch ( Exception e ) { - return null; - } - } - - /** - * Gets the job log table. - * - * @return the job log table - */ - public JobLogTable getJobLogTable() { - return jobLogTable; - } + /** + * Compares two transformation on name, filename + */ + public int compare(JobMeta j1, JobMeta j2) { + if (Const.isEmpty(j1.getName()) && !Const.isEmpty(j2.getName())) { + return -1; + } + if (!Const.isEmpty(j1.getName()) && Const.isEmpty(j2.getName())) { + return 1; + } + if (Const.isEmpty(j1.getName()) && Const.isEmpty(j2.getName()) || j1.getName().equals(j2.getName())) { + if (Const.isEmpty(j1.getFilename()) && !Const.isEmpty(j2.getFilename())) { + return -1; + } + if (!Const.isEmpty(j1.getFilename()) && Const.isEmpty(j2.getFilename())) { + return 1; + } + if (Const.isEmpty(j1.getFilename()) && Const.isEmpty(j2.getFilename())) { + return 0; + } + return j1.getFilename().compareTo(j2.getFilename()); + } - /** - * Sets the job log table. - * - * @param jobLogTable the new job log table - */ - public void setJobLogTable( JobLogTable jobLogTable ) { - this.jobLogTable = jobLogTable; - } + // Compare by name : repositories etc. + // + if (j1.getObjectRevision() != null && j2.getObjectRevision() == null) { + return 1; + } + if (j1.getObjectRevision() == null && j2.getObjectRevision() != null) { + return -1; + } + int cmp; + if (j1.getObjectRevision() == null && j2.getObjectRevision() == null) { + cmp = 0; + } else { + cmp = j1.getObjectRevision().getName().compareTo(j2.getObjectRevision().getName()); + } + if (cmp == 0) { + return j1.getName().compareTo(j2.getName()); + } else { + return cmp; + } + } - /** - * Clears the different changed flags of the job. - */ - @Override - public void clearChanged() { - changedEntries = false; - changedHops = false; + /** + * Compares this job's meta-data to the specified job's meta-data. This method simply calls compare(this, o) + * + * @param o the o + * @return the int + * @see #compare(JobMeta, JobMeta) + * @see java.lang.Comparable#compareTo(java.lang.Object) + */ + public int compareTo(JobMeta o) { + return compare(this, o); + } + + /** + * Checks whether this job's meta-data object is equal to the specified object. If the specified object is not an + * instance of JobMeta, false is returned. Otherwise the method returns whether a call to compare() indicates equality + * (i.e. compare(this, (JobMeta)obj)==0). + * + * @param obj the obj + * @return true, if successful + * @see #compare(JobMeta, JobMeta) + * @see java.lang.Object#equals(java.lang.Object) + */ + public boolean equals(Object obj) { + if (!(obj instanceof JobMeta)) { + return false; + } - for ( int i = 0; i < nrJobEntries(); i++ ) { - JobEntryCopy entry = getJobEntry( i ); - entry.setChanged( false ); + return compare(this, (JobMeta) obj) == 0; } - for ( JobHopMeta hi : jobhops ) { - // Look at all the hops - hi.setChanged( false ); + + /** + * Clones the job meta-data object. + * + * @return a clone of the job meta-data object + * @see java.lang.Object#clone() + */ + public Object clone() { + return realClone(true); } - super.clearChanged(); - } - /* - * (non-Javadoc) - * - * @see org.pentaho.di.core.changed.ChangedFlag#hasChanged() - */ - @Override - public boolean hasChanged() { - if ( super.hasChanged() ) { - return true; + /** + * Perform a real clone of the job meta-data object, including cloning all lists and copying all values. If the + * doClear parameter is true, the clone will be cleared of ALL values before the copy. If false, only the copied + * fields will be cleared. + * + * @param doClear Whether to clear all of the clone's data before copying from the source object + * @return a real clone of the calling object + */ + public Object realClone(boolean doClear) { + try { + JobMeta jobMeta = (JobMeta) super.clone(); + if (doClear) { + jobMeta.clear(); + } else { + jobMeta.jobcopies = new ArrayList(); + jobMeta.jobhops = new ArrayList(); + jobMeta.notes = new ArrayList(); + jobMeta.databases = new ArrayList(); + jobMeta.slaveServers = new ArrayList(); + jobMeta.namedParams = new NamedParamsDefault(); + } + + for (JobEntryCopy entry : jobcopies) { + jobMeta.jobcopies.add((JobEntryCopy) entry.clone_deep()); + } + for (JobHopMeta entry : jobhops) { + jobMeta.jobhops.add((JobHopMeta) entry.clone()); + } + for (NotePadMeta entry : notes) { + jobMeta.notes.add((NotePadMeta) entry.clone()); + } + for (DatabaseMeta entry : databases) { + jobMeta.databases.add((DatabaseMeta) entry.clone()); + } + for (SlaveServer slave : slaveServers) { + jobMeta.getSlaveServers().add((SlaveServer) slave.clone()); + } + for (String key : listParameters()) { + jobMeta.addParameterDefinition(key, getParameterDefault(key), getParameterDescription(key)); + } + return jobMeta; + } catch (Exception e) { + return null; + } + } + + /** + * Gets the job log table. + * + * @return the job log table + */ + public JobLogTable getJobLogTable() { + return jobLogTable; } - if ( haveJobEntriesChanged() ) { - return true; + /** + * Sets the job log table. + * + * @param jobLogTable the new job log table + */ + public void setJobLogTable(JobLogTable jobLogTable) { + this.jobLogTable = jobLogTable; } - if ( haveJobHopsChanged() ) { - return true; + + /** + * Clears the different changed flags of the job. + */ + @Override + public void clearChanged() { + changedEntries = false; + changedHops = false; + + for (int i = 0; i < nrJobEntries(); i++) { + JobEntryCopy entry = getJobEntry(i); + entry.setChanged(false); + } + for (JobHopMeta hi : jobhops) { + // Look at all the hops + hi.setChanged(false); + } + super.clearChanged(); } - return false; - } + /* + * (non-Javadoc) + * + * @see org.pentaho.di.core.changed.ChangedFlag#hasChanged() + */ + @Override + public boolean hasChanged() { + if (super.hasChanged()) { + return true; + } - private Set getUsedDatabaseMetas() { - Set databaseMetas = new HashSet(); - for ( JobEntryCopy jobEntryCopy : getJobCopies() ) { - DatabaseMeta[] dbs = jobEntryCopy.getEntry().getUsedDatabaseConnections(); - if ( dbs != null ) { - for ( DatabaseMeta db : dbs ) { - databaseMetas.add( db ); + if (haveJobEntriesChanged()) { + return true; + } + if (haveJobHopsChanged()) { + return true; } - } + + return false; } - databaseMetas.add( jobLogTable.getDatabaseMeta() ); + private Set getUsedDatabaseMetas() { + Set databaseMetas = new HashSet(); + for (JobEntryCopy jobEntryCopy : getJobCopies()) { + DatabaseMeta[] dbs = jobEntryCopy.getEntry().getUsedDatabaseConnections(); + if (dbs != null) { + for (DatabaseMeta db : dbs) { + databaseMetas.add(db); + } + } + } + + databaseMetas.add(jobLogTable.getDatabaseMeta()); - for ( LogTableInterface logTable : getExtraLogTables() ) { - databaseMetas.add( logTable.getDatabaseMeta() ); + for (LogTableInterface logTable : getExtraLogTables()) { + databaseMetas.add(logTable.getDatabaseMeta()); + } + return databaseMetas; } - return databaseMetas; - } - /** - * This method asks all steps in the transformation whether or not the specified database connection is used. The - * connection is used in the transformation if any of the steps uses it or if it is being used to log to. - * - * @param databaseMeta The connection to check - * @return true if the connection is used in this transformation. - */ - public boolean isDatabaseConnectionUsed( DatabaseMeta databaseMeta ) { - return getUsedDatabaseMetas().contains( databaseMeta ); - } + /** + * This method asks all steps in the transformation whether or not the specified database connection is used. The + * connection is used in the transformation if any of the steps uses it or if it is being used to log to. + * + * @param databaseMeta The connection to check + * @return true if the connection is used in this transformation. + */ + public boolean isDatabaseConnectionUsed(DatabaseMeta databaseMeta) { + return getUsedDatabaseMetas().contains(databaseMeta); + } - /* + /* * (non-Javadoc) * * @see org.pentaho.di.core.EngineMetaInterface#getFileType() */ - public String getFileType() { - return LastUsedFile.FILE_TYPE_JOB; - } + public String getFileType() { + return LastUsedFile.FILE_TYPE_JOB; + } - /** - * Gets the job filter names. - * - * @return the filter names - * @see org.pentaho.di.core.EngineMetaInterface#getFilterNames() - */ - public String[] getFilterNames() { - return Const.getJobFilterNames(); - } + /** + * Gets the job filter names. + * + * @return the filter names + * @see org.pentaho.di.core.EngineMetaInterface#getFilterNames() + */ + public String[] getFilterNames() { + return Const.getJobFilterNames(); + } - /** - * Gets the job filter extensions. For JobMeta, this method returns the value of {@link Const.STRING_JOB_FILTER_EXT} - * - * @return the filter extensions - * @see org.pentaho.di.core.EngineMetaInterface#getFilterExtensions() - */ - public String[] getFilterExtensions() { - return Const.STRING_JOB_FILTER_EXT; - } + /** + * Gets the job filter extensions. For JobMeta, this method returns the value of {@link Const.STRING_JOB_FILTER_EXT} + * + * @return the filter extensions + * @see org.pentaho.di.core.EngineMetaInterface#getFilterExtensions() + */ + public String[] getFilterExtensions() { + return Const.STRING_JOB_FILTER_EXT; + } - /** - * Gets the default extension for a job. For JobMeta, this method returns the value of - * {@link Const#STRING_JOB_DEFAULT_EXT} - * - * @return the default extension - * @see org.pentaho.di.core.EngineMetaInterface#getDefaultExtension() - */ - public String getDefaultExtension() { - return Const.STRING_JOB_DEFAULT_EXT; - } + /** + * Gets the default extension for a job. For JobMeta, this method returns the value of + * {@link Const#STRING_JOB_DEFAULT_EXT} + * + * @return the default extension + * @see org.pentaho.di.core.EngineMetaInterface#getDefaultExtension() + */ + public String getDefaultExtension() { + return Const.STRING_JOB_DEFAULT_EXT; + } - /* + /* * (non-Javadoc) * * @see org.pentaho.di.core.xml.XMLInterface#getXML() */ - public String getXML() { - Props props = null; - if ( Props.isInitialized() ) { - props = Props.getInstance(); - } + public String getXML() { + Props props = null; + if (Props.isInitialized()) { + props = Props.getInstance(); + } + + StringBuffer retval = new StringBuffer(500); + + retval.append("<").append(XML_TAG).append(">").append(Const.CR); + + retval.append(" ").append(XMLHandler.addTagValue("name", getName())); + + retval.append(" ").append(XMLHandler.addTagValue("description", description)); + retval.append(" ").append(XMLHandler.addTagValue("extended_description", extendedDescription)); + retval.append(" ").append(XMLHandler.addTagValue("job_version", jobVersion)); + if (jobStatus >= 0) { + retval.append(" ").append(XMLHandler.addTagValue("job_status", jobStatus)); + } + + retval.append(" ").append(XMLHandler.addTagValue("directory", + (directory != null ? directory.getPath() : RepositoryDirectory.DIRECTORY_SEPARATOR))); + retval.append(" ").append(XMLHandler.addTagValue("created_user", createdUser)); + retval.append(" ").append(XMLHandler.addTagValue("created_date", XMLHandler.date2string(createdDate))); + retval.append(" ").append(XMLHandler.addTagValue("modified_user", modifiedUser)); + retval.append(" ").append(XMLHandler.addTagValue("modified_date", XMLHandler.date2string(modifiedDate))); + + retval.append(" ").append(XMLHandler.openTag(XML_TAG_PARAMETERS)).append(Const.CR); + String[] parameters = listParameters(); + for (int idx = 0; idx < parameters.length; idx++) { + retval.append(" ").append(XMLHandler.openTag("parameter")).append(Const.CR); + retval.append(" ").append(XMLHandler.addTagValue("name", parameters[idx])); + try { + retval.append(" ") + .append(XMLHandler.addTagValue("default_value", getParameterDefault(parameters[idx]))); + retval.append(" ") + .append(XMLHandler.addTagValue("description", getParameterDescription(parameters[idx]))); + } catch (UnknownParamException e) { + // skip the default value and/or description. This exception should never happen because we use listParameters() + // above. + } + retval.append(" ").append(XMLHandler.closeTag("parameter")).append(Const.CR); + } + retval.append(" ").append(XMLHandler.closeTag(XML_TAG_PARAMETERS)).append(Const.CR); + + Set usedDatabaseMetas = getUsedDatabaseMetas(); + // Save the database connections... + for (int i = 0; i < nrDatabases(); i++) { + DatabaseMeta dbMeta = getDatabase(i); + if (props != null && props.areOnlyUsedConnectionsSavedToXML()) { + if (usedDatabaseMetas.contains(dbMeta)) { + retval.append(dbMeta.getXML()); + } + } else { + retval.append(dbMeta.getXML()); + } + } + + // The slave servers... + // + retval.append(" ").append(XMLHandler.openTag(XML_TAG_SLAVESERVERS)).append(Const.CR); + for (int i = 0; i < slaveServers.size(); i++) { + SlaveServer slaveServer = slaveServers.get(i); + retval.append(" ").append(slaveServer.getXML()).append(Const.CR); + } + retval.append(" ").append(XMLHandler.closeTag(XML_TAG_SLAVESERVERS)).append(Const.CR); - StringBuffer retval = new StringBuffer( 500 ); + // Append the job logging information... + // + for (LogTableInterface logTable : getLogTables()) { + retval.append(logTable.getXML()); + } + + retval.append(" ").append(XMLHandler.addTagValue("pass_batchid", batchIdPassed)); + retval.append(" ").append(XMLHandler.addTagValue("shared_objects_file", sharedObjectsFile)); + + retval.append(" ").append(Const.CR); + for (int i = 0; i < nrJobEntries(); i++) { + JobEntryCopy jge = getJobEntry(i); + jge.getEntry().setRepository(repository); + retval.append(jge.getXML()); + } + retval.append(" ").append(Const.CR); + + retval.append(" ").append(Const.CR); + for (JobHopMeta hi : jobhops) { + // Look at all the hops + retval.append(hi.getXML()); + } + retval.append(" ").append(Const.CR); + + retval.append(" ").append(Const.CR); + for (int i = 0; i < nrNotes(); i++) { + NotePadMeta ni = getNote(i); + retval.append(ni.getXML()); + } + retval.append(" ").append(Const.CR); - retval.append( "<" ).append( XML_TAG ).append( ">" ).append( Const.CR ); + // Also store the attribute groups + // + retval.append(AttributesUtil.getAttributesXml(attributesMap)).append(Const.CR); - retval.append( " " ).append( XMLHandler.addTagValue( "name", getName() ) ); + retval.append("").append(Const.CR); - retval.append( " " ).append( XMLHandler.addTagValue( "description", description ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "extended_description", extendedDescription ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "job_version", jobVersion ) ); - if ( jobStatus >= 0 ) { - retval.append( " " ).append( XMLHandler.addTagValue( "job_status", jobStatus ) ); + return retval.toString(); } - retval.append( " " ).append( XMLHandler.addTagValue( "directory", - ( directory != null ? directory.getPath() : RepositoryDirectory.DIRECTORY_SEPARATOR ) ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "created_user", createdUser ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "created_date", XMLHandler.date2string( createdDate ) ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "modified_user", modifiedUser ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "modified_date", XMLHandler.date2string( modifiedDate ) ) ); + /** + * Instantiates a new job meta. + * + * @param fname the fname + * @param rep the rep + * @throws KettleXMLException the kettle xml exception + */ + public JobMeta(String fname, Repository rep) throws KettleXMLException { + this(null, fname, rep, null); + } - retval.append( " " ).append( XMLHandler.openTag( XML_TAG_PARAMETERS ) ).append( Const.CR ); - String[] parameters = listParameters(); - for ( int idx = 0; idx < parameters.length; idx++ ) { - retval.append( " " ).append( XMLHandler.openTag( "parameter" ) ).append( Const.CR ); - retval.append( " " ).append( XMLHandler.addTagValue( "name", parameters[idx] ) ); - try { - retval.append( " " ) - .append( XMLHandler.addTagValue( "default_value", getParameterDefault( parameters[idx] ) ) ); - retval.append( " " ) - .append( XMLHandler.addTagValue( "description", getParameterDescription( parameters[idx] ) ) ); - } catch ( UnknownParamException e ) { - // skip the default value and/or description. This exception should never happen because we use listParameters() - // above. - } - retval.append( " " ).append( XMLHandler.closeTag( "parameter" ) ).append( Const.CR ); + /** + * Instantiates a new job meta. + * + * @param fname the fname + * @param rep the rep + * @param prompter the prompter + * @throws KettleXMLException the kettle xml exception + */ + public JobMeta(String fname, Repository rep, OverwritePrompter prompter) throws KettleXMLException { + this(null, fname, rep, prompter); } - retval.append( " " ).append( XMLHandler.closeTag( XML_TAG_PARAMETERS ) ).append( Const.CR ); - Set usedDatabaseMetas = getUsedDatabaseMetas(); - // Save the database connections... - for ( int i = 0; i < nrDatabases(); i++ ) { - DatabaseMeta dbMeta = getDatabase( i ); - if ( props != null && props.areOnlyUsedConnectionsSavedToXML() ) { - if ( usedDatabaseMetas.contains( dbMeta ) ) { - retval.append( dbMeta.getXML() ); + /** + * Load the job from the XML file specified. + * + * @param fname The filename to load as a job + * @param rep The repository to bind againt, null if there is no repository available. + * @throws KettleXMLException + */ + @Deprecated + public JobMeta(VariableSpace parentSpace, String fname, Repository rep, OverwritePrompter prompter) + throws KettleXMLException { + this(parentSpace, fname, rep, null, prompter); + } + + /** + * Load the job from the XML file specified. + * + * @param fname The filename to load as a job + * @param rep The repository to bind againt, null if there is no repository available. + * @throws KettleXMLException + */ + public JobMeta(VariableSpace parentSpace, String fname, Repository rep, IMetaStore metaStore, + OverwritePrompter prompter) throws KettleXMLException { + this.initializeVariablesFrom(parentSpace); + this.metaStore = metaStore; + try { + // OK, try to load using the VFS stuff... + Document doc = XMLHandler.loadXMLFile(KettleVFS.getFileObject(fname, this)); + if (doc != null) { + // The jobnode + Node jobnode = XMLHandler.getSubNode(doc, XML_TAG); + + loadXML(jobnode, fname, rep, metaStore, false, prompter); + } else { + throw new KettleXMLException( + BaseMessages.getString(PKG, "JobMeta.Exception.ErrorReadingFromXMLFile") + fname); + } + } catch (Exception e) { + throw new KettleXMLException( + BaseMessages.getString(PKG, "JobMeta.Exception.UnableToLoadJobFromXMLFile") + fname + "]", e); } - } else { - retval.append( dbMeta.getXML() ); - } } - // The slave servers... - // - retval.append( " " ).append( XMLHandler.openTag( XML_TAG_SLAVESERVERS ) ).append( Const.CR ); - for ( int i = 0; i < slaveServers.size(); i++ ) { - SlaveServer slaveServer = slaveServers.get( i ); - retval.append( " " ).append( slaveServer.getXML() ).append( Const.CR ); + /** + * Instantiates a new job meta. + * + * @param inputStream the input stream + * @param rep the rep + * @param prompter the prompter + * @throws KettleXMLException the kettle xml exception + */ + public JobMeta(InputStream inputStream, Repository rep, OverwritePrompter prompter) throws KettleXMLException { + this(); + Document doc = XMLHandler.loadXMLFile(inputStream, null, false, false); + loadXML(XMLHandler.getSubNode(doc, JobMeta.XML_TAG), rep, prompter); + } + + /** + * Create a new JobMeta object by loading it from a a DOM node. + * + * @param jobnode The node to load from + * @param rep The reference to a repository to load additional information from + * @param prompter The prompter to use in case a shared object gets overwritten + * @throws KettleXMLException + */ + public JobMeta(Node jobnode, Repository rep, OverwritePrompter prompter) throws KettleXMLException { + this(); + loadXML(jobnode, rep, false, prompter); + } + + /** + * Create a new JobMeta object by loading it from a a DOM node. + * + * @param jobnode The node to load from + * @param rep The reference to a repository to load additional information from + * @param ignoreRepositorySharedObjects Do not load shared objects, handled separately + * @param prompter The prompter to use in case a shared object gets overwritten + * @throws KettleXMLException + */ + public JobMeta(Node jobnode, Repository rep, boolean ignoreRepositorySharedObjects, OverwritePrompter prompter) + throws KettleXMLException { + this(); + loadXML(jobnode, rep, ignoreRepositorySharedObjects, prompter); } - retval.append( " " ).append( XMLHandler.closeTag( XML_TAG_SLAVESERVERS ) ).append( Const.CR ); - // Append the job logging information... - // - for ( LogTableInterface logTable : getLogTables() ) { - retval.append( logTable.getXML() ); + /** + * Checks if is rep reference. + * + * @return true, if is rep reference + */ + public boolean isRepReference() { + return isRepReference(getFilename(), this.getName()); } - retval.append( " " ).append( XMLHandler.addTagValue( "pass_batchid", batchIdPassed ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "shared_objects_file", sharedObjectsFile ) ); - - retval.append( " " ).append( Const.CR ); - for ( int i = 0; i < nrJobEntries(); i++ ) { - JobEntryCopy jge = getJobEntry( i ); - jge.getEntry().setRepository( repository ); - retval.append( jge.getXML() ); + /** + * Checks if is file reference. + * + * @return true, if is file reference + */ + public boolean isFileReference() { + return !isRepReference(getFilename(), this.getName()); } - retval.append( " " ).append( Const.CR ); - retval.append( " " ).append( Const.CR ); - for ( JobHopMeta hi : jobhops ) { - // Look at all the hops - retval.append( hi.getXML() ); + /** + * Checks if is rep reference. + * + * @param fileName the file name + * @param transName the trans name + * @return true, if is rep reference + */ + public static boolean isRepReference(String fileName, String transName) { + return Const.isEmpty(fileName) && !Const.isEmpty(transName); } - retval.append( " " ).append( Const.CR ); - retval.append( " " ).append( Const.CR ); - for ( int i = 0; i < nrNotes(); i++ ) { - NotePadMeta ni = getNote( i ); - retval.append( ni.getXML() ); + /** + * Checks if is file reference. + * + * @param fileName the file name + * @param transName the trans name + * @return true, if is file reference + */ + public static boolean isFileReference(String fileName, String transName) { + return !isRepReference(fileName, transName); } - retval.append( " " ).append( Const.CR ); - // Also store the attribute groups - // - retval.append( AttributesUtil.getAttributesXml( attributesMap ) ).append( Const.CR ); + /** + * Load xml. + * + * @param jobnode the jobnode + * @param rep the rep + * @param prompter the prompter + * @throws KettleXMLException the kettle xml exception + */ + public void loadXML(Node jobnode, Repository rep, OverwritePrompter prompter) throws KettleXMLException { + loadXML(jobnode, rep, false, prompter); + } + + /** + * Load xml. + * + * @param jobnode the jobnode + * @param fname The filename + * @param rep the rep + * @param prompter the prompter + * @throws KettleXMLException the kettle xml exception + */ + public void loadXML(Node jobnode, String fname, Repository rep, OverwritePrompter prompter) + throws KettleXMLException { + loadXML(jobnode, fname, rep, false, prompter); + } + + /** + * Load a block of XML from an DOM node. + * + * @param jobnode The node to load from + * @param rep The reference to a repository to load additional information from + * @param ignoreRepositorySharedObjects Do not load shared objects, handled separately + * @param prompter The prompter to use in case a shared object gets overwritten + * @throws KettleXMLException + */ + public void loadXML(Node jobnode, Repository rep, boolean ignoreRepositorySharedObjects, OverwritePrompter prompter) + throws KettleXMLException { + loadXML(jobnode, null, rep, ignoreRepositorySharedObjects, prompter); + } + + /** + * Load a block of XML from an DOM node. + * + * @param jobnode The node to load from + * @param fname The filename + * @param rep The reference to a repository to load additional information from + * @param ignoreRepositorySharedObjects Do not load shared objects, handled separately + * @param prompter The prompter to use in case a shared object gets overwritten + * @throws KettleXMLException + * @deprecated + */ + @Deprecated + public void loadXML(Node jobnode, String fname, Repository rep, boolean ignoreRepositorySharedObjects, + OverwritePrompter prompter) throws KettleXMLException { + loadXML(jobnode, fname, rep, null, ignoreRepositorySharedObjects, prompter); + } + + /** + * Load a block of XML from an DOM node. + * + * @param jobnode The node to load from + * @param fname The filename + * @param rep The reference to a repository to load additional information from + * @param metaStore the MetaStore to use + * @param ignoreRepositorySharedObjects Do not load shared objects, handled separately + * @param prompter The prompter to use in case a shared object gets overwritten + * @throws KettleXMLException + */ + public void loadXML(Node jobnode, String fname, Repository rep, IMetaStore metaStore, + boolean ignoreRepositorySharedObjects, OverwritePrompter prompter) throws KettleXMLException { + Props props = null; + if (Props.isInitialized()) { + props = Props.getInstance(); + } - retval.append( "" ).append( Const.CR ); + try { + // clear the jobs; + clear(); - return retval.toString(); - } + // If we are not using a repository, we are getting the job from a file + // Set the filename here so it can be used in variables for ALL aspects of the job FIX: PDI-8890 + if (null == rep) { + setFilename(fname); + } - /** - * Instantiates a new job meta. - * - * @param fname the fname - * @param rep the rep - * @throws KettleXMLException the kettle xml exception - */ - public JobMeta( String fname, Repository rep ) throws KettleXMLException { - this( null, fname, rep, null ); - } + // + // get job info: + // + setName(XMLHandler.getTagValue(jobnode, "name")); - /** - * Instantiates a new job meta. - * - * @param fname the fname - * @param rep the rep - * @param prompter the prompter - * @throws KettleXMLException the kettle xml exception - */ - public JobMeta( String fname, Repository rep, OverwritePrompter prompter ) throws KettleXMLException { - this( null, fname, rep, prompter ); - } + // Optionally load the repository directory... + // + if (rep != null) { + String directoryPath = XMLHandler.getTagValue(jobnode, "directory"); + if (directoryPath != null) { + directory = rep.findDirectory(directoryPath); + if (directory == null) { // not found + directory = new RepositoryDirectory(); // The root as default + } + } + } - /** - * Load the job from the XML file specified. - * - * @param fname The filename to load as a job - * @param rep The repository to bind againt, null if there is no repository available. - * @throws KettleXMLException - */ - @Deprecated - public JobMeta( VariableSpace parentSpace, String fname, Repository rep, OverwritePrompter prompter ) - throws KettleXMLException { - this( parentSpace, fname, rep, null, prompter ); - } - - /** - * Load the job from the XML file specified. - * - * @param fname The filename to load as a job - * @param rep The repository to bind againt, null if there is no repository available. - * @throws KettleXMLException - */ - public JobMeta( VariableSpace parentSpace, String fname, Repository rep, IMetaStore metaStore, - OverwritePrompter prompter ) throws KettleXMLException { - this.initializeVariablesFrom( parentSpace ); - this.metaStore = metaStore; - try { - // OK, try to load using the VFS stuff... - Document doc = XMLHandler.loadXMLFile( KettleVFS.getFileObject( fname, this ) ); - if ( doc != null ) { - // The jobnode - Node jobnode = XMLHandler.getSubNode( doc, XML_TAG ); - - loadXML( jobnode, fname, rep, metaStore, false, prompter ); - } else { - throw new KettleXMLException( - BaseMessages.getString( PKG, "JobMeta.Exception.ErrorReadingFromXMLFile" ) + fname ); - } - } catch ( Exception e ) { - throw new KettleXMLException( - BaseMessages.getString( PKG, "JobMeta.Exception.UnableToLoadJobFromXMLFile" ) + fname + "]", e ); - } - } - - /** - * Instantiates a new job meta. - * - * @param inputStream the input stream - * @param rep the rep - * @param prompter the prompter - * @throws KettleXMLException the kettle xml exception - */ - public JobMeta( InputStream inputStream, Repository rep, OverwritePrompter prompter ) throws KettleXMLException { - this(); - Document doc = XMLHandler.loadXMLFile( inputStream, null, false, false ); - loadXML( XMLHandler.getSubNode( doc, JobMeta.XML_TAG ), rep, prompter ); - } - - /** - * Create a new JobMeta object by loading it from a a DOM node. - * - * @param jobnode The node to load from - * @param rep The reference to a repository to load additional information from - * @param prompter The prompter to use in case a shared object gets overwritten - * @throws KettleXMLException - */ - public JobMeta( Node jobnode, Repository rep, OverwritePrompter prompter ) throws KettleXMLException { - this(); - loadXML( jobnode, rep, false, prompter ); - } + // description + description = XMLHandler.getTagValue(jobnode, "description"); - /** - * Create a new JobMeta object by loading it from a a DOM node. - * - * @param jobnode The node to load from - * @param rep The reference to a repository to load additional information from - * @param ignoreRepositorySharedObjects Do not load shared objects, handled separately - * @param prompter The prompter to use in case a shared object gets overwritten - * @throws KettleXMLException - */ - public JobMeta( Node jobnode, Repository rep, boolean ignoreRepositorySharedObjects, OverwritePrompter prompter ) - throws KettleXMLException { - this(); - loadXML( jobnode, rep, ignoreRepositorySharedObjects, prompter ); - } - - /** - * Checks if is rep reference. - * - * @return true, if is rep reference - */ - public boolean isRepReference() { - return isRepReference( getFilename(), this.getName() ); - } + // extended description + extendedDescription = XMLHandler.getTagValue(jobnode, "extended_description"); - /** - * Checks if is file reference. - * - * @return true, if is file reference - */ - public boolean isFileReference() { - return !isRepReference( getFilename(), this.getName() ); - } + // job version + jobVersion = XMLHandler.getTagValue(jobnode, "job_version"); - /** - * Checks if is rep reference. - * - * @param fileName the file name - * @param transName the trans name - * @return true, if is rep reference - */ - public static boolean isRepReference( String fileName, String transName ) { - return Const.isEmpty( fileName ) && !Const.isEmpty( transName ); - } + // job status + jobStatus = Const.toInt(XMLHandler.getTagValue(jobnode, "job_status"), -1); - /** - * Checks if is file reference. - * - * @param fileName the file name - * @param transName the trans name - * @return true, if is file reference - */ - public static boolean isFileReference( String fileName, String transName ) { - return !isRepReference( fileName, transName ); - } + // Created user/date + createdUser = XMLHandler.getTagValue(jobnode, "created_user"); + String createDate = XMLHandler.getTagValue(jobnode, "created_date"); - /** - * Load xml. - * - * @param jobnode the jobnode - * @param rep the rep - * @param prompter the prompter - * @throws KettleXMLException the kettle xml exception - */ - public void loadXML( Node jobnode, Repository rep, OverwritePrompter prompter ) throws KettleXMLException { - loadXML( jobnode, rep, false, prompter ); - } + if (createDate != null) { + createdDate = XMLHandler.stringToDate(createDate); + } - /** - * Load xml. - * - * @param jobnode the jobnode - * @param fname The filename - * @param rep the rep - * @param prompter the prompter - * @throws KettleXMLException the kettle xml exception - */ - public void loadXML( Node jobnode, String fname, Repository rep, OverwritePrompter prompter ) - throws KettleXMLException { - loadXML( jobnode, fname, rep, false, prompter ); - } + // Changed user/date + modifiedUser = XMLHandler.getTagValue(jobnode, "modified_user"); + String modDate = XMLHandler.getTagValue(jobnode, "modified_date"); + if (modDate != null) { + modifiedDate = XMLHandler.stringToDate(modDate); + } - /** - * Load a block of XML from an DOM node. - * - * @param jobnode The node to load from - * @param rep The reference to a repository to load additional information from - * @param ignoreRepositorySharedObjects Do not load shared objects, handled separately - * @param prompter The prompter to use in case a shared object gets overwritten - * @throws KettleXMLException - */ - public void loadXML( Node jobnode, Repository rep, boolean ignoreRepositorySharedObjects, OverwritePrompter prompter ) - throws KettleXMLException { - loadXML( jobnode, null, rep, ignoreRepositorySharedObjects, prompter ); - } + // Load the default list of databases + // Read objects from the shared XML file & the repository + try { + sharedObjectsFile = XMLHandler.getTagValue(jobnode, "shared_objects_file"); + if (rep == null || ignoreRepositorySharedObjects) { + sharedObjects = readSharedObjects(); + } else { + sharedObjects = rep.readJobMetaSharedObjects(this); + } + } catch (Exception e) { + LogChannel.GENERAL + .logError(BaseMessages.getString(PKG, "JobMeta.ErrorReadingSharedObjects.Message", e.toString())); + LogChannel.GENERAL.logError(Const.getStackTracker(e)); + } - /** - * Load a block of XML from an DOM node. - * - * @param jobnode The node to load from - * @param fname The filename - * @param rep The reference to a repository to load additional information from - * @param ignoreRepositorySharedObjects Do not load shared objects, handled separately - * @param prompter The prompter to use in case a shared object gets overwritten - * @throws KettleXMLException - * @deprecated - */ - @Deprecated - public void loadXML( Node jobnode, String fname, Repository rep, boolean ignoreRepositorySharedObjects, - OverwritePrompter prompter ) throws KettleXMLException { - loadXML( jobnode, fname, rep, null, ignoreRepositorySharedObjects, prompter ); - } - - /** - * Load a block of XML from an DOM node. - * - * @param jobnode The node to load from - * @param fname The filename - * @param rep The reference to a repository to load additional information from - * @param metaStore the MetaStore to use - * @param ignoreRepositorySharedObjects Do not load shared objects, handled separately - * @param prompter The prompter to use in case a shared object gets overwritten - * @throws KettleXMLException - */ - public void loadXML( Node jobnode, String fname, Repository rep, IMetaStore metaStore, - boolean ignoreRepositorySharedObjects, OverwritePrompter prompter ) throws KettleXMLException { - Props props = null; - if ( Props.isInitialized() ) { - props = Props.getInstance(); - } - - try { - // clear the jobs; - clear(); - - // If we are not using a repository, we are getting the job from a file - // Set the filename here so it can be used in variables for ALL aspects of the job FIX: PDI-8890 - if ( null == rep ) { - setFilename( fname ); - } - - // - // get job info: - // - setName( XMLHandler.getTagValue( jobnode, "name" ) ); - - // Optionally load the repository directory... - // - if ( rep != null ) { - String directoryPath = XMLHandler.getTagValue( jobnode, "directory" ); - if ( directoryPath != null ) { - directory = rep.findDirectory( directoryPath ); - if ( directory == null ) { // not found - directory = new RepositoryDirectory(); // The root as default - } - } - } - - // description - description = XMLHandler.getTagValue( jobnode, "description" ); - - // extended description - extendedDescription = XMLHandler.getTagValue( jobnode, "extended_description" ); - - // job version - jobVersion = XMLHandler.getTagValue( jobnode, "job_version" ); - - // job status - jobStatus = Const.toInt( XMLHandler.getTagValue( jobnode, "job_status" ), -1 ); - - // Created user/date - createdUser = XMLHandler.getTagValue( jobnode, "created_user" ); - String createDate = XMLHandler.getTagValue( jobnode, "created_date" ); - - if ( createDate != null ) { - createdDate = XMLHandler.stringToDate( createDate ); - } - - // Changed user/date - modifiedUser = XMLHandler.getTagValue( jobnode, "modified_user" ); - String modDate = XMLHandler.getTagValue( jobnode, "modified_date" ); - if ( modDate != null ) { - modifiedDate = XMLHandler.stringToDate( modDate ); - } - - // Load the default list of databases - // Read objects from the shared XML file & the repository - try { - sharedObjectsFile = XMLHandler.getTagValue( jobnode, "shared_objects_file" ); - if ( rep == null || ignoreRepositorySharedObjects ) { - sharedObjects = readSharedObjects(); - } else { - sharedObjects = rep.readJobMetaSharedObjects( this ); - } - } catch ( Exception e ) { - LogChannel.GENERAL - .logError( BaseMessages.getString( PKG, "JobMeta.ErrorReadingSharedObjects.Message", e.toString() ) ); - LogChannel.GENERAL.logError( Const.getStackTracker( e ) ); - } - - // Load the database connections, slave servers, cluster schemas & partition schemas into this object. - // - importFromMetaStore(); - - // Read the named parameters. - Node paramsNode = XMLHandler.getSubNode( jobnode, XML_TAG_PARAMETERS ); - int nrParams = XMLHandler.countNodes( paramsNode, "parameter" ); - - for ( int i = 0; i < nrParams; i++ ) { - Node paramNode = XMLHandler.getSubNodeByNr( paramsNode, "parameter", i ); - - String paramName = XMLHandler.getTagValue( paramNode, "name" ); - String defValue = XMLHandler.getTagValue( paramNode, "default_value" ); - String descr = XMLHandler.getTagValue( paramNode, "description" ); - - addParameterDefinition( paramName, defValue, descr ); - } - - // - // Read the database connections - // - int nr = XMLHandler.countNodes( jobnode, "connection" ); - Set privateDatabases = new HashSet( nr ); - for ( int i = 0; i < nr; i++ ) { - Node dbnode = XMLHandler.getSubNodeByNr( jobnode, "connection", i ); - DatabaseMeta dbcon = new DatabaseMeta( dbnode ); - dbcon.shareVariablesWith( this ); - if ( !dbcon.isShared() ) { - privateDatabases.add( dbcon.getName() ); - } - - DatabaseMeta exist = findDatabase( dbcon.getName() ); - if ( exist == null ) { - addDatabase( dbcon ); - } else { - if ( !exist.isShared() ) { - // skip shared connections - if ( shouldOverwrite( prompter, props, - BaseMessages.getString( PKG, "JobMeta.Dialog.ConnectionExistsOverWrite.Message", dbcon.getName() ), - BaseMessages.getString( PKG, "JobMeta.Dialog.ConnectionExistsOverWrite.DontShowAnyMoreMessage" ) ) ) { - int idx = indexOfDatabase( exist ); - removeDatabase( idx ); - addDatabase( idx, dbcon ); - } - } - } - } - setPrivateDatabases( privateDatabases ); - - // Read the slave servers... - // - Node slaveServersNode = XMLHandler.getSubNode( jobnode, XML_TAG_SLAVESERVERS ); - int nrSlaveServers = XMLHandler.countNodes( slaveServersNode, SlaveServer.XML_TAG ); - for ( int i = 0; i < nrSlaveServers; i++ ) { - Node slaveServerNode = XMLHandler.getSubNodeByNr( slaveServersNode, SlaveServer.XML_TAG, i ); - SlaveServer slaveServer = new SlaveServer( slaveServerNode ); - slaveServer.shareVariablesWith( this ); - - // Check if the object exists and if it's a shared object. - // If so, then we will keep the shared version, not this one. - // The stored XML is only for backup purposes. - SlaveServer check = findSlaveServer( slaveServer.getName() ); - if ( check != null ) { - if ( !check.isShared() ) { - // we don't overwrite shared objects. - if ( shouldOverwrite( prompter, props, BaseMessages - .getString( PKG, "JobMeta.Dialog.SlaveServerExistsOverWrite.Message", slaveServer.getName() ), - BaseMessages.getString( PKG, "JobMeta.Dialog.ConnectionExistsOverWrite.DontShowAnyMoreMessage" ) ) ) { - addOrReplaceSlaveServer( slaveServer ); - } - } - } else { - slaveServers.add( slaveServer ); - } - } + // Load the database connections, slave servers, cluster schemas & partition schemas into this object. + // + importFromMetaStore(); + + // Read the named parameters. + Node paramsNode = XMLHandler.getSubNode(jobnode, XML_TAG_PARAMETERS); + int nrParams = XMLHandler.countNodes(paramsNode, "parameter"); + + for (int i = 0; i < nrParams; i++) { + Node paramNode = XMLHandler.getSubNodeByNr(paramsNode, "parameter", i); + + String paramName = XMLHandler.getTagValue(paramNode, "name"); + String defValue = XMLHandler.getTagValue(paramNode, "default_value"); + String descr = XMLHandler.getTagValue(paramNode, "description"); + + addParameterDefinition(paramName, defValue, descr); + } + + // + // Read the database connections + // + int nr = XMLHandler.countNodes(jobnode, "connection"); + Set privateDatabases = new HashSet(nr); + for (int i = 0; i < nr; i++) { + Node dbnode = XMLHandler.getSubNodeByNr(jobnode, "connection", i); + DatabaseMeta dbcon = new DatabaseMeta(dbnode); + dbcon.shareVariablesWith(this); + if (!dbcon.isShared()) { + privateDatabases.add(dbcon.getName()); + } + + DatabaseMeta exist = findDatabase(dbcon.getName()); + if (exist == null) { + addDatabase(dbcon); + } else { + if (!exist.isShared()) { + // skip shared connections + if (shouldOverwrite(prompter, props, + BaseMessages.getString(PKG, "JobMeta.Dialog.ConnectionExistsOverWrite.Message", dbcon.getName()), + BaseMessages.getString(PKG, "JobMeta.Dialog.ConnectionExistsOverWrite.DontShowAnyMoreMessage"))) { + int idx = indexOfDatabase(exist); + removeDatabase(idx); + addDatabase(idx, dbcon); + } + } + } + } + setPrivateDatabases(privateDatabases); + + // Read the slave servers... + // + Node slaveServersNode = XMLHandler.getSubNode(jobnode, XML_TAG_SLAVESERVERS); + int nrSlaveServers = XMLHandler.countNodes(slaveServersNode, SlaveServer.XML_TAG); + for (int i = 0; i < nrSlaveServers; i++) { + Node slaveServerNode = XMLHandler.getSubNodeByNr(slaveServersNode, SlaveServer.XML_TAG, i); + SlaveServer slaveServer = new SlaveServer(slaveServerNode); + slaveServer.shareVariablesWith(this); + + // Check if the object exists and if it's a shared object. + // If so, then we will keep the shared version, not this one. + // The stored XML is only for backup purposes. + SlaveServer check = findSlaveServer(slaveServer.getName()); + if (check != null) { + if (!check.isShared()) { + // we don't overwrite shared objects. + if (shouldOverwrite(prompter, props, BaseMessages + .getString(PKG, "JobMeta.Dialog.SlaveServerExistsOverWrite.Message", slaveServer.getName()), + BaseMessages.getString(PKG, "JobMeta.Dialog.ConnectionExistsOverWrite.DontShowAnyMoreMessage"))) { + addOrReplaceSlaveServer(slaveServer); + } + } + } else { + slaveServers.add(slaveServer); + } + } /* * Get the log database connection & log table */ - // Backward compatibility... - // - Node jobLogNode = XMLHandler.getSubNode( jobnode, JobLogTable.XML_TAG ); - if ( jobLogNode == null ) { - // Load the XML - // - jobLogTable.setConnectionName( XMLHandler.getTagValue( jobnode, "logconnection" ) ); - jobLogTable.setTableName( XMLHandler.getTagValue( jobnode, "logtable" ) ); - jobLogTable.setBatchIdUsed( "Y".equalsIgnoreCase( XMLHandler.getTagValue( jobnode, "use_batchid" ) ) ); - jobLogTable.setLogFieldUsed( "Y".equalsIgnoreCase( XMLHandler.getTagValue( jobnode, "use_logfield" ) ) ); - jobLogTable.findField( JobLogTable.ID.CHANNEL_ID ).setEnabled( false ); - jobLogTable.findField( JobLogTable.ID.LINES_REJECTED ).setEnabled( false ); - } else { - jobLogTable.loadXML( jobLogNode, databases, null ); - } - - Node channelLogTableNode = XMLHandler.getSubNode( jobnode, ChannelLogTable.XML_TAG ); - if ( channelLogTableNode != null ) { - channelLogTable.loadXML( channelLogTableNode, databases, null ); - } - jobEntryLogTable.loadXML( jobnode, databases, null ); - - for ( LogTableInterface extraLogTable : extraLogTables ) { - extraLogTable.loadXML( jobnode, databases, null ); - } - - batchIdPassed = "Y".equalsIgnoreCase( XMLHandler.getTagValue( jobnode, "pass_batchid" ) ); + // Backward compatibility... + // + Node jobLogNode = XMLHandler.getSubNode(jobnode, JobLogTable.XML_TAG); + if (jobLogNode == null) { + // Load the XML + // + jobLogTable.setConnectionName(XMLHandler.getTagValue(jobnode, "logconnection")); + jobLogTable.setTableName(XMLHandler.getTagValue(jobnode, "logtable")); + jobLogTable.setBatchIdUsed("Y".equalsIgnoreCase(XMLHandler.getTagValue(jobnode, "use_batchid"))); + jobLogTable.setLogFieldUsed("Y".equalsIgnoreCase(XMLHandler.getTagValue(jobnode, "use_logfield"))); + jobLogTable.findField(JobLogTable.ID.CHANNEL_ID).setEnabled(false); + jobLogTable.findField(JobLogTable.ID.LINES_REJECTED).setEnabled(false); + } else { + jobLogTable.loadXML(jobLogNode, databases, null); + } + + Node channelLogTableNode = XMLHandler.getSubNode(jobnode, ChannelLogTable.XML_TAG); + if (channelLogTableNode != null) { + channelLogTable.loadXML(channelLogTableNode, databases, null); + } + jobEntryLogTable.loadXML(jobnode, databases, null); + + for (LogTableInterface extraLogTable : extraLogTables) { + extraLogTable.loadXML(jobnode, databases, null); + } + + batchIdPassed = "Y".equalsIgnoreCase(XMLHandler.getTagValue(jobnode, "pass_batchid")); /* * read the job entries... */ - Node entriesnode = XMLHandler.getSubNode( jobnode, "entries" ); - int tr = XMLHandler.countNodes( entriesnode, "entry" ); - for ( int i = 0; i < tr; i++ ) { - Node entrynode = XMLHandler.getSubNodeByNr( entriesnode, "entry", i ); - // System.out.println("Reading entry:\n"+entrynode); + Node entriesnode = XMLHandler.getSubNode(jobnode, "entries"); + int tr = XMLHandler.countNodes(entriesnode, "entry"); + for (int i = 0; i < tr; i++) { + Node entrynode = XMLHandler.getSubNodeByNr(entriesnode, "entry", i); + // System.out.println("Reading entry:\n"+entrynode); + + JobEntryCopy je = new JobEntryCopy(entrynode, databases, slaveServers, rep, metaStore); + + if (je.isSpecial() && je.isMissing()) { + addMissingEntry((MissingEntry) je.getEntry()); + } + JobEntryCopy prev = findJobEntry(je.getName(), 0, true); + if (prev != null) { + // See if the #0 (root entry) already exists! + // + if (je.getNr() == 0) { + + // Replace previous version with this one: remove it first + // + int idx = indexOfJobEntry(prev); + removeJobEntry(idx); + + } else if (je.getNr() > 0) { + + // Use previously defined JobEntry info! + // + je.setEntry(prev.getEntry()); + + // See if entry already exists... + prev = findJobEntry(je.getName(), je.getNr(), true); + if (prev != null) { + // remove the old one! + // + int idx = indexOfJobEntry(prev); + removeJobEntry(idx); + } + } + } + // Add the JobEntryCopy... + addJobEntry(je); + } - JobEntryCopy je = new JobEntryCopy( entrynode, databases, slaveServers, rep, metaStore ); + Node hopsnode = XMLHandler.getSubNode(jobnode, "hops"); + int ho = XMLHandler.countNodes(hopsnode, "hop"); + for (int i = 0; i < ho; i++) { + Node hopnode = XMLHandler.getSubNodeByNr(hopsnode, "hop", i); + JobHopMeta hi = new JobHopMeta(hopnode, this); + jobhops.add(hi); + } - if ( je.isSpecial() && je.isMissing() ) { - addMissingEntry( (MissingEntry) je.getEntry() ); - } - JobEntryCopy prev = findJobEntry( je.getName(), 0, true ); - if ( prev != null ) { - // See if the #0 (root entry) already exists! - // - if ( je.getNr() == 0 ) { + // Read the notes... + Node notepadsnode = XMLHandler.getSubNode(jobnode, "notepads"); + int nrnotes = XMLHandler.countNodes(notepadsnode, "notepad"); + for (int i = 0; i < nrnotes; i++) { + Node notepadnode = XMLHandler.getSubNodeByNr(notepadsnode, "notepad", i); + NotePadMeta ni = new NotePadMeta(notepadnode); + notes.add(ni); + } - // Replace previous version with this one: remove it first + // Load the attribute groups map // - int idx = indexOfJobEntry( prev ); - removeJobEntry( idx ); - - } else if ( je.getNr() > 0 ) { + attributesMap = AttributesUtil.loadAttributes(XMLHandler.getSubNode(jobnode, AttributesUtil.XML_TAG)); - // Use previously defined JobEntry info! - // - je.setEntry( prev.getEntry() ); - - // See if entry already exists... - prev = findJobEntry( je.getName(), je.getNr(), true ); - if ( prev != null ) { - // remove the old one! - // - int idx = indexOfJobEntry( prev ); - removeJobEntry( idx ); - } - } - } - // Add the JobEntryCopy... - addJobEntry( je ); - } - - Node hopsnode = XMLHandler.getSubNode( jobnode, "hops" ); - int ho = XMLHandler.countNodes( hopsnode, "hop" ); - for ( int i = 0; i < ho; i++ ) { - Node hopnode = XMLHandler.getSubNodeByNr( hopsnode, "hop", i ); - JobHopMeta hi = new JobHopMeta( hopnode, this ); - jobhops.add( hi ); - } - - // Read the notes... - Node notepadsnode = XMLHandler.getSubNode( jobnode, "notepads" ); - int nrnotes = XMLHandler.countNodes( notepadsnode, "notepad" ); - for ( int i = 0; i < nrnotes; i++ ) { - Node notepadnode = XMLHandler.getSubNodeByNr( notepadsnode, "notepad", i ); - NotePadMeta ni = new NotePadMeta( notepadnode ); - notes.add( ni ); - } - - // Load the attribute groups map - // - attributesMap = AttributesUtil.loadAttributes( XMLHandler.getSubNode( jobnode, AttributesUtil.XML_TAG ) ); - - ExtensionPointHandler.callExtensionPoint( LogChannel.GENERAL, KettleExtensionPoint.JobMetaLoaded.id, this ); - - clearChanged(); - } catch ( Exception e ) { - throw new KettleXMLException( BaseMessages.getString( PKG, "JobMeta.Exception.UnableToLoadJobFromXMLNode" ), e ); - } finally { - setInternalKettleVariables(); - } - } - - /** - * Read shared objects. - * - * @return the shared objects - * @throws KettleException the kettle exception - */ - public SharedObjects readSharedObjects() throws KettleException { - // Extract the shared steps, connections, etc. using the SharedObjects - // class - // - String soFile = environmentSubstitute( sharedObjectsFile ); - SharedObjects sharedObjects = new SharedObjects( soFile ); - Map objectsMap = sharedObjects.getObjectsMap(); - - // First read the databases... - // We read databases & slaves first because there might be dependencies - // that need to be resolved. - // - for ( SharedObjectInterface object : objectsMap.values() ) { - if ( object instanceof DatabaseMeta ) { - DatabaseMeta databaseMeta = (DatabaseMeta) object; - databaseMeta.shareVariablesWith( this ); - addOrReplaceDatabase( databaseMeta ); - } else if ( object instanceof SlaveServer ) { - SlaveServer slaveServer = (SlaveServer) object; - slaveServer.shareVariablesWith( this ); - addOrReplaceSlaveServer( slaveServer ); - } - } - - return sharedObjects; - } - - /* - * (non-Javadoc) - * - * @see org.pentaho.di.core.EngineMetaInterface#saveSharedObjects() - */ - public void saveSharedObjects() throws KettleException { - try { - // First load all the shared objects... - String soFile = environmentSubstitute( sharedObjectsFile ); - SharedObjects sharedObjects = new SharedObjects( soFile ); - - // Now overwrite the objects in there - List shared = new ArrayList(); - shared.addAll( databases ); - shared.addAll( slaveServers ); - - // The databases connections... - for ( int i = 0; i < shared.size(); i++ ) { - SharedObjectInterface sharedObject = (SharedObjectInterface) shared.get( i ); - if ( sharedObject.isShared() ) { - sharedObjects.storeObject( sharedObject ); - } - } - - // Save the objects - sharedObjects.saveToFile(); - } catch ( Exception e ) { - throw new KettleException( "Unable to save shared ojects", e ); - } - } - - /** - * Gets the job entry copy. - * - * @param x the x - * @param y the y - * @param iconsize the iconsize - * @return the job entry copy - */ - public JobEntryCopy getJobEntryCopy( int x, int y, int iconsize ) { - int i, s; - s = nrJobEntries(); - for ( i = s - 1; i >= 0; i-- ) { - // Back to front because drawing goes from start to end + ExtensionPointHandler.callExtensionPoint(LogChannel.GENERAL, KettleExtensionPoint.JobMetaLoaded.id, this); - JobEntryCopy je = getJobEntry( i ); - Point p = je.getLocation(); - if ( p != null ) { - if ( x >= p.x && x <= p.x + iconsize && y >= p.y && y <= p.y + iconsize ) { - return je; + clearChanged(); + } catch (Exception e) { + throw new KettleXMLException(BaseMessages.getString(PKG, "JobMeta.Exception.UnableToLoadJobFromXMLNode"), e); + } finally { + setInternalKettleVariables(); } - } } - return null; - } - /** - * Nr job entries. - * - * @return the int - */ - public int nrJobEntries() { - return jobcopies.size(); - } + /** + * Read shared objects. + * + * @return the shared objects + * @throws KettleException the kettle exception + */ + public SharedObjects readSharedObjects() throws KettleException { + // Extract the shared steps, connections, etc. using the SharedObjects + // class + // + String soFile = environmentSubstitute(sharedObjectsFile); + SharedObjects sharedObjects = new SharedObjects(soFile); + Map objectsMap = sharedObjects.getObjectsMap(); - /** - * Nr job hops. - * - * @return the int - */ - public int nrJobHops() { - return jobhops.size(); - } + // First read the databases... + // We read databases & slaves first because there might be dependencies + // that need to be resolved. + // + for (SharedObjectInterface object : objectsMap.values()) { + if (object instanceof DatabaseMeta) { + DatabaseMeta databaseMeta = (DatabaseMeta) object; + databaseMeta.shareVariablesWith(this); + addOrReplaceDatabase(databaseMeta); + } else if (object instanceof SlaveServer) { + SlaveServer slaveServer = (SlaveServer) object; + slaveServer.shareVariablesWith(this); + addOrReplaceSlaveServer(slaveServer); + } + } - /** - * Gets the job hop. - * - * @param i the i - * @return the job hop - */ - public JobHopMeta getJobHop( int i ) { - return jobhops.get( i ); - } + return sharedObjects; + } - /** - * Gets the job entry. + /* + * (non-Javadoc) * - * @param i the i - * @return the job entry + * @see org.pentaho.di.core.EngineMetaInterface#saveSharedObjects() */ - public JobEntryCopy getJobEntry( int i ) { - return jobcopies.get( i ); - } + public void saveSharedObjects() throws KettleException { + try { + // First load all the shared objects... + String soFile = environmentSubstitute(sharedObjectsFile); + SharedObjects sharedObjects = new SharedObjects(soFile); + + // Now overwrite the objects in there + List shared = new ArrayList(); + shared.addAll(databases); + shared.addAll(slaveServers); + + // The databases connections... + for (int i = 0; i < shared.size(); i++) { + SharedObjectInterface sharedObject = (SharedObjectInterface) shared.get(i); + if (sharedObject.isShared()) { + sharedObjects.storeObject(sharedObject); + } + } - /** - * Adds the job entry. - * - * @param je the je - */ - public void addJobEntry( JobEntryCopy je ) { - jobcopies.add( je ); - je.setParentJobMeta( this ); - setChanged(); - } - - /** - * Adds the job hop. - * - * @param hi the hi - */ - public void addJobHop( JobHopMeta hi ) { - jobhops.add( hi ); - setChanged(); - } + // Save the objects + sharedObjects.saveToFile(); + } catch (Exception e) { + throw new KettleException("Unable to save shared ojects", e); + } + } - /** - * Adds the job entry. - * - * @param p the p - * @param si the si - */ - public void addJobEntry( int p, JobEntryCopy si ) { - jobcopies.add( p, si ); - changedEntries = true; - } + /** + * Gets the job entry copy. + * + * @param x the x + * @param y the y + * @param iconsize the iconsize + * @return the job entry copy + */ + public JobEntryCopy getJobEntryCopy(int x, int y, int iconsize) { + int i, s; + s = nrJobEntries(); + for (i = s - 1; i >= 0; i--) { + // Back to front because drawing goes from start to end + + JobEntryCopy je = getJobEntry(i); + Point p = je.getLocation(); + if (p != null) { + if (x >= p.x && x <= p.x + iconsize && y >= p.y && y <= p.y + iconsize) { + return je; + } + } + } + return null; + } - /** - * Adds the job hop. - * - * @param p the p - * @param hi the hi - */ - public void addJobHop( int p, JobHopMeta hi ) { - try { - jobhops.add( p, hi ); - } catch ( IndexOutOfBoundsException e ) { - jobhops.add( hi ); + /** + * Nr job entries. + * + * @return the int + */ + public int nrJobEntries() { + return jobcopies.size(); } - changedHops = true; - } - /** - * Removes the job entry. - * - * @param i the i - */ - public void removeJobEntry( int i ) { - JobEntryCopy deleted = jobcopies.remove( i ); - if ( deleted != null ) { - if ( deleted.getEntry() instanceof MissingEntry ) { - removeMissingEntry( (MissingEntry) deleted.getEntry() ); - } - } - setChanged(); - } - - /** - * Removes the job hop. - * - * @param i the i - */ - public void removeJobHop( int i ) { - jobhops.remove( i ); - setChanged(); - } - - /** - * Removes a hop from the transformation. Also marks that the - * transformation's hops have changed. - * - * @param hop The hop to remove from the list of hops - */ - public void removeJobHop( JobHopMeta hop ) { - jobhops.remove( hop ); - setChanged(); - } + /** + * Nr job hops. + * + * @return the int + */ + public int nrJobHops() { + return jobhops.size(); + } - /** - * Index of job hop. - * - * @param he the he - * @return the int - */ - public int indexOfJobHop( JobHopMeta he ) { - return jobhops.indexOf( he ); - } + /** + * Gets the job hop. + * + * @param i the i + * @return the job hop + */ + public JobHopMeta getJobHop(int i) { + return jobhops.get(i); + } - /** - * Index of job entry. - * - * @param ge the ge - * @return the int - */ - public int indexOfJobEntry( JobEntryCopy ge ) { - return jobcopies.indexOf( ge ); - } + /** + * Gets the job entry. + * + * @param i the i + * @return the job entry + */ + public JobEntryCopy getJobEntry(int i) { + return jobcopies.get(i); + } - /** - * Sets the job entry. - * - * @param idx the idx - * @param jec the jec - */ - public void setJobEntry( int idx, JobEntryCopy jec ) { - jobcopies.set( idx, jec ); - } + /** + * Adds the job entry. + * + * @param je the je + */ + public void addJobEntry(JobEntryCopy je) { + jobcopies.add(je); + je.setParentJobMeta(this); + setChanged(); + } - /** - * Find an existing JobEntryCopy by it's name and number - * - * @param name The name of the job entry copy - * @param nr The number of the job entry copy - * @return The JobEntryCopy or null if nothing was found! - */ - public JobEntryCopy findJobEntry( String name, int nr, boolean searchHiddenToo ) { - for ( int i = 0; i < nrJobEntries(); i++ ) { - JobEntryCopy jec = getJobEntry( i ); - if ( jec.getName().equalsIgnoreCase( name ) && jec.getNr() == nr ) { - if ( searchHiddenToo || jec.isDrawn() ) { - return jec; + /** + * Adds the job hop. + * + * @param hi the hi + */ + public void addJobHop(JobHopMeta hi) { + jobhops.add(hi); + setChanged(); + } + + /** + * Adds the job entry. + * + * @param p the p + * @param si the si + */ + public void addJobEntry(int p, JobEntryCopy si) { + jobcopies.add(p, si); + changedEntries = true; + } + + /** + * Adds the job hop. + * + * @param p the p + * @param hi the hi + */ + public void addJobHop(int p, JobHopMeta hi) { + try { + jobhops.add(p, hi); + } catch (IndexOutOfBoundsException e) { + jobhops.add(hi); } - } + changedHops = true; } - return null; - } - /** - * Find job entry. - * - * @param full_name_nr the full_name_nr - * @return the job entry copy - */ - public JobEntryCopy findJobEntry( String full_name_nr ) { - int i; - for ( i = 0; i < nrJobEntries(); i++ ) { - JobEntryCopy jec = getJobEntry( i ); - JobEntryInterface je = jec.getEntry(); - if ( je.toString().equalsIgnoreCase( full_name_nr ) ) { - return jec; - } - } - return null; - } - - /** - * Find job hop. - * - * @param name the name - * @return the job hop meta - */ - public JobHopMeta findJobHop( String name ) { - for ( JobHopMeta hi : jobhops ) { - // Look at all the hops + /** + * Removes the job entry. + * + * @param i the i + */ + public void removeJobEntry(int i) { + JobEntryCopy deleted = jobcopies.remove(i); + if (deleted != null) { + if (deleted.getEntry() instanceof MissingEntry) { + removeMissingEntry((MissingEntry) deleted.getEntry()); + } + } + setChanged(); + } - if ( hi.toString().equalsIgnoreCase( name ) ) { - return hi; - } + /** + * Removes the job hop. + * + * @param i the i + */ + public void removeJobHop(int i) { + jobhops.remove(i); + setChanged(); } - return null; - } - /** - * Find job hop from. - * - * @param jge the jge - * @return the job hop meta - */ - public JobHopMeta findJobHopFrom( JobEntryCopy jge ) { - if ( jge != null ) { - for ( JobHopMeta hi : jobhops ) { + /** + * Removes a hop from the transformation. Also marks that the + * transformation's hops have changed. + * + * @param hop The hop to remove from the list of hops + */ + public void removeJobHop(JobHopMeta hop) { + jobhops.remove(hop); + setChanged(); + } + + /** + * Index of job hop. + * + * @param he the he + * @return the int + */ + public int indexOfJobHop(JobHopMeta he) { + return jobhops.indexOf(he); + } + + /** + * Index of job entry. + * + * @param ge the ge + * @return the int + */ + public int indexOfJobEntry(JobEntryCopy ge) { + return jobcopies.indexOf(ge); + } + + /** + * Sets the job entry. + * + * @param idx the idx + * @param jec the jec + */ + public void setJobEntry(int idx, JobEntryCopy jec) { + jobcopies.set(idx, jec); + } + + /** + * Find an existing JobEntryCopy by it's name and number + * + * @param name The name of the job entry copy + * @param nr The number of the job entry copy + * @return The JobEntryCopy or null if nothing was found! + */ + public JobEntryCopy findJobEntry(String name, int nr, boolean searchHiddenToo) { + for (int i = 0; i < nrJobEntries(); i++) { + JobEntryCopy jec = getJobEntry(i); + if (jec.getName().equalsIgnoreCase(name) && jec.getNr() == nr) { + if (searchHiddenToo || jec.isDrawn()) { + return jec; + } + } + } + return null; + } - // Return the first we find! + /** + * Find job entry. + * + * @param full_name_nr the full_name_nr + * @return the job entry copy + */ + public JobEntryCopy findJobEntry(String full_name_nr) { + int i; + for (i = 0; i < nrJobEntries(); i++) { + JobEntryCopy jec = getJobEntry(i); + JobEntryInterface je = jec.getEntry(); + if (je.toString().equalsIgnoreCase(full_name_nr)) { + return jec; + } + } + return null; + } + + /** + * Find job hop. + * + * @param name the name + * @return the job hop meta + */ + public JobHopMeta findJobHop(String name) { + for (JobHopMeta hi : jobhops) { + // Look at all the hops + + if (hi.toString().equalsIgnoreCase(name)) { + return hi; + } + } + return null; + } + + /** + * Find job hop from. + * + * @param jge the jge + * @return the job hop meta + */ + public JobHopMeta findJobHopFrom(JobEntryCopy jge) { + if (jge != null) { + for (JobHopMeta hi : jobhops) { + + // Return the first we find! + // + if (hi != null && (hi.getFromEntry() != null) && hi.getFromEntry().equals(jge)) { + return hi; + } + } + } + return null; + } + + /** + * Find job hop. + * + * @param from the from + * @param to the to + * @return the job hop meta + */ + public JobHopMeta findJobHop(JobEntryCopy from, JobEntryCopy to) { + return findJobHop(from, to, false); + } + + /** + * Find job hop. + * + * @param from the from + * @param to the to + * @param includeDisabled the include disabled + * @return the job hop meta + */ + public JobHopMeta findJobHop(JobEntryCopy from, JobEntryCopy to, boolean includeDisabled) { + for (JobHopMeta hi : jobhops) { + if (hi.isEnabled() || includeDisabled) { + if (hi != null && hi.getFromEntry() != null && hi.getToEntry() != null && hi.getFromEntry().equals(from) + && hi.getToEntry().equals(to)) { + return hi; + } + } + } + return null; + } + + /** + * Find job hop to. + * + * @param jge the jge + * @return the job hop meta + */ + public JobHopMeta findJobHopTo(JobEntryCopy jge) { + for (JobHopMeta hi : jobhops) { + if (hi != null && hi.getToEntry() != null && hi.getToEntry().equals(jge)) { + // Return the first! + return hi; + } + } + return null; + } + + /** + * Find nr prev job entries. + * + * @param from the from + * @return the int + */ + public int findNrPrevJobEntries(JobEntryCopy from) { + return findNrPrevJobEntries(from, false); + } + + /** + * Find prev job entry. + * + * @param to the to + * @param nr the nr + * @return the job entry copy + */ + public JobEntryCopy findPrevJobEntry(JobEntryCopy to, int nr) { + return findPrevJobEntry(to, nr, false); + } + + /** + * Find nr prev job entries. + * + * @param to the to + * @param info the info + * @return the int + */ + public int findNrPrevJobEntries(JobEntryCopy to, boolean info) { + int count = 0; + + for (JobHopMeta hi : jobhops) { + // Look at all the hops + + if (hi.isEnabled() && hi.getToEntry().equals(to)) { + count++; + } + } + return count; + } + + /** + * Find prev job entry. + * + * @param to the to + * @param nr the nr + * @param info the info + * @return the job entry copy + */ + public JobEntryCopy findPrevJobEntry(JobEntryCopy to, int nr, boolean info) { + int count = 0; + + for (JobHopMeta hi : jobhops) { + // Look at all the hops + + if (hi.isEnabled() && hi.getToEntry().equals(to)) { + if (count == nr) { + return hi.getFromEntry(); + } + count++; + } + } + return null; + } + + /** + * Find nr next job entries. + * + * @param from the from + * @return the int + */ + public int findNrNextJobEntries(JobEntryCopy from) { + int count = 0; + for (JobHopMeta hi : jobhops) { + // Look at all the hops + + if (hi.isEnabled() && (hi.getFromEntry() != null) && hi.getFromEntry().equals(from)) { + count++; + } + } + return count; + } + + /** + * Find next job entry. + * + * @param from the from + * @param cnt the cnt + * @return the job entry copy + */ + public JobEntryCopy findNextJobEntry(JobEntryCopy from, int cnt) { + int count = 0; + + for (JobHopMeta hi : jobhops) { + // Look at all the hops + + if (hi.isEnabled() && (hi.getFromEntry() != null) && hi.getFromEntry().equals(from)) { + if (count == cnt) { + return hi.getToEntry(); + } + count++; + } + } + return null; + } + + /** + * Checks for loop. + * + * @param entry the entry + * @return true, if successful + */ + public boolean hasLoop(JobEntryCopy entry) { + clearLoopCache(); + return hasLoop(entry, null, true) || hasLoop(entry, null, false); + } + + /** + * Checks for loop. + * + * @param entry the entry + * @param lookup the lookup + * @return true, if successful + */ + public boolean hasLoop(JobEntryCopy entry, JobEntryCopy lookup, boolean info) { + String cacheKey = + entry.getName() + " - " + (lookup != null ? lookup.getName() : "") + " - " + (info ? "true" : "false"); + + Boolean loop = loopCache.get(cacheKey); + if (loop != null) { + return loop.booleanValue(); + } + + boolean hasLoop = false; + + int nr = findNrPrevJobEntries(entry, info); + for (int i = 0; i < nr && !hasLoop; i++) { + JobEntryCopy prevJobMeta = findPrevJobEntry(entry, i, info); + if (prevJobMeta != null) { + if (prevJobMeta.equals(entry)) { + hasLoop = true; + break; // no need to check more but caching this one below + } else if (prevJobMeta.equals(lookup)) { + hasLoop = true; + break; // no need to check more but caching this one below + } else if (hasLoop(prevJobMeta, lookup == null ? entry : lookup, info)) { + hasLoop = true; + break; // no need to check more but caching this one below + } + } + } + // Store in the cache... // - if ( hi != null && ( hi.getFromEntry() != null ) && hi.getFromEntry().equals( jge ) ) { - return hi; + loopCache.put(cacheKey, Boolean.valueOf(hasLoop)); + return hasLoop; + } + + /** + * Clears the loop cache. + */ + private void clearLoopCache() { + loopCache.clear(); + } + + /** + * Checks if is entry used in hops. + * + * @param jge the jge + * @return true, if is entry used in hops + */ + public boolean isEntryUsedInHops(JobEntryCopy jge) { + JobHopMeta fr = findJobHopFrom(jge); + JobHopMeta to = findJobHopTo(jge); + if (fr != null || to != null) { + return true; } - } + return false; } - return null; - } - /** - * Find job hop. - * - * @param from the from - * @param to the to - * @return the job hop meta - */ - public JobHopMeta findJobHop( JobEntryCopy from, JobEntryCopy to ) { - return findJobHop( from, to, false ); - } + /** + * Count entries. + * + * @param name the name + * @return the int + */ + public int countEntries(String name) { + int count = 0; + int i; + for (i = 0; i < nrJobEntries(); i++) { + // Look at all the hops; + + JobEntryCopy je = getJobEntry(i); + if (je.getName().equalsIgnoreCase(name)) { + count++; + } + } + return count; + } - /** - * Find job hop. - * - * @param from the from - * @param to the to - * @param includeDisabled the include disabled - * @return the job hop meta - */ - public JobHopMeta findJobHop( JobEntryCopy from, JobEntryCopy to, boolean includeDisabled ) { - for ( JobHopMeta hi : jobhops ) { - if ( hi.isEnabled() || includeDisabled ) { - if ( hi != null && hi.getFromEntry() != null && hi.getToEntry() != null && hi.getFromEntry().equals( from ) - && hi.getToEntry().equals( to ) ) { - return hi; + /** + * Find unused nr. + * + * @param name the name + * @return the int + */ + public int findUnusedNr(String name) { + int nr = 1; + JobEntryCopy je = findJobEntry(name, nr, true); + while (je != null) { + nr++; + // log.logDebug("findUnusedNr()", "Trying unused nr: "+nr); + je = findJobEntry(name, nr, true); } - } + return nr; } - return null; - } - /** - * Find job hop to. - * - * @param jge the jge - * @return the job hop meta - */ - public JobHopMeta findJobHopTo( JobEntryCopy jge ) { - for ( JobHopMeta hi : jobhops ) { - if ( hi != null && hi.getToEntry() != null && hi.getToEntry().equals( jge ) ) { - // Return the first! - return hi; - } - } - return null; - } - - /** - * Find nr prev job entries. - * - * @param from the from - * @return the int - */ - public int findNrPrevJobEntries( JobEntryCopy from ) { - return findNrPrevJobEntries( from, false ); - } + /** + * Find max nr. + * + * @param name the name + * @return the int + */ + public int findMaxNr(String name) { + int max = 0; + for (int i = 0; i < nrJobEntries(); i++) { + JobEntryCopy je = getJobEntry(i); + if (je.getName().equalsIgnoreCase(name)) { + if (je.getNr() > max) { + max = je.getNr(); + } + } + } + return max; + } - /** - * Find prev job entry. - * - * @param to the to - * @param nr the nr - * @return the job entry copy - */ - public JobEntryCopy findPrevJobEntry( JobEntryCopy to, int nr ) { - return findPrevJobEntry( to, nr, false ); - } + /** + * Proposes an alternative job entry name when the original already exists... + * + * @param entryname The job entry name to find an alternative for.. + * @return The alternative stepname. + */ + public String getAlternativeJobentryName(String entryname) { + String newname = entryname; + JobEntryCopy jec = findJobEntry(newname); + int nr = 1; + while (jec != null) { + nr++; + newname = entryname + " " + nr; + jec = findJobEntry(newname); + } - /** - * Find nr prev job entries. - * - * @param to the to - * @param info the info - * @return the int - */ - public int findNrPrevJobEntries( JobEntryCopy to, boolean info ) { - int count = 0; + return newname; + } + + /** + * Gets the all job graph entries. + * + * @param name the name + * @return the all job graph entries + */ + public JobEntryCopy[] getAllJobGraphEntries(String name) { + int count = 0; + for (int i = 0; i < nrJobEntries(); i++) { + JobEntryCopy je = getJobEntry(i); + if (je.getName().equalsIgnoreCase(name)) { + count++; + } + } + JobEntryCopy[] retval = new JobEntryCopy[count]; + + count = 0; + for (int i = 0; i < nrJobEntries(); i++) { + JobEntryCopy je = getJobEntry(i); + if (je.getName().equalsIgnoreCase(name)) { + retval[count] = je; + count++; + } + } + return retval; + } + + /** + * Gets the all job hops using. + * + * @param name the name + * @return the all job hops using + */ + public JobHopMeta[] getAllJobHopsUsing(String name) { + List hops = new ArrayList(); + + for (JobHopMeta hi : jobhops) { + // Look at all the hops + + if (hi.getFromEntry() != null && hi.getToEntry() != null) { + if (hi.getFromEntry().getName().equalsIgnoreCase(name) || hi.getToEntry().getName() + .equalsIgnoreCase(name)) { + hops.add(hi); + } + } + } + return hops.toArray(new JobHopMeta[hops.size()]); + } + + public boolean isPathExist(JobEntryInterface from, JobEntryInterface to) { + for (JobHopMeta hi : jobhops) { + if (hi.getFromEntry() != null && hi.getToEntry() != null) { + if (hi.getFromEntry().getName().equalsIgnoreCase(from.getName())) { + if (hi.getToEntry().getName().equalsIgnoreCase(to.getName())) { + return true; + } + if (isPathExist(hi.getToEntry().getEntry(), to)) { + return true; + } + } + } + } - for ( JobHopMeta hi : jobhops ) { - // Look at all the hops + return false; + } - if ( hi.isEnabled() && hi.getToEntry().equals( to ) ) { - count++; - } + /** + * Select all. + */ + public void selectAll() { + int i; + for (i = 0; i < nrJobEntries(); i++) { + JobEntryCopy ce = getJobEntry(i); + ce.setSelected(true); + } + for (i = 0; i < nrNotes(); i++) { + NotePadMeta ni = getNote(i); + ni.setSelected(true); + } + setChanged(); + notifyObservers("refreshGraph"); } - return count; - } - /** - * Find prev job entry. - * - * @param to the to - * @param nr the nr - * @param info the info - * @return the job entry copy - */ - public JobEntryCopy findPrevJobEntry( JobEntryCopy to, int nr, boolean info ) { - int count = 0; + /** + * Unselect all. + */ + public void unselectAll() { + int i; + for (i = 0; i < nrJobEntries(); i++) { + JobEntryCopy ce = getJobEntry(i); + ce.setSelected(false); + } + for (i = 0; i < nrNotes(); i++) { + NotePadMeta ni = getNote(i); + ni.setSelected(false); + } + } + + /** + * Gets the maximum. + * + * @return the maximum + */ + public Point getMaximum() { + int maxx = 0, maxy = 0; + for (int i = 0; i < nrJobEntries(); i++) { + JobEntryCopy entry = getJobEntry(i); + Point loc = entry.getLocation(); + if (loc.x > maxx) { + maxx = loc.x; + } + if (loc.y > maxy) { + maxy = loc.y; + } + } + for (int i = 0; i < nrNotes(); i++) { + NotePadMeta ni = getNote(i); + Point loc = ni.getLocation(); + if (loc.x + ni.width > maxx) { + maxx = loc.x + ni.width; + } + if (loc.y + ni.height > maxy) { + maxy = loc.y + ni.height; + } + } - for ( JobHopMeta hi : jobhops ) { - // Look at all the hops + return new Point(maxx + 100, maxy + 100); + } + + /** + * Get the minimum point on the canvas of a job + * + * @return Minimum coordinate of a step in the job + */ + public Point getMinimum() { + int minx = Integer.MAX_VALUE; + int miny = Integer.MAX_VALUE; + for (int i = 0; i < nrJobEntries(); i++) { + JobEntryCopy jobEntryCopy = getJobEntry(i); + Point loc = jobEntryCopy.getLocation(); + if (loc.x < minx) { + minx = loc.x; + } + if (loc.y < miny) { + miny = loc.y; + } + } + for (int i = 0; i < nrNotes(); i++) { + NotePadMeta notePadMeta = getNote(i); + Point loc = notePadMeta.getLocation(); + if (loc.x < minx) { + minx = loc.x; + } + if (loc.y < miny) { + miny = loc.y; + } + } + + if (minx > 20) { + minx -= 20; + } else { + minx = 0; + } + if (miny > 20) { + miny -= 20; + } else { + miny = 0; + } + + return new Point(minx, miny); + } + + /** + * Gets the selected locations. + * + * @return the selected locations + */ + public Point[] getSelectedLocations() { + List selectedEntries = getSelectedEntries(); + Point[] retval = new Point[selectedEntries.size()]; + for (int i = 0; i < retval.length; i++) { + JobEntryCopy si = selectedEntries.get(i); + Point p = si.getLocation(); + retval[i] = new Point(p.x, p.y); // explicit copy of location + } + return retval; + } + + /** + * Get all the selected note locations + * + * @return The selected step and notes locations. + */ + public Point[] getSelectedNoteLocations() { + List points = new ArrayList(); + + for (NotePadMeta ni : getSelectedNotes()) { + Point p = ni.getLocation(); + points.add(new Point(p.x, p.y)); // explicit copy of location + } + + return points.toArray(new Point[points.size()]); + } + + /** + * Gets the selected entries. + * + * @return the selected entries + */ + public List getSelectedEntries() { + List selection = new ArrayList(); + for (JobEntryCopy je : jobcopies) { + if (je.isSelected()) { + selection.add(je); + } + } + return selection; + } + + /** + * Gets the entry indexes. + * + * @param entries the entries + * @return the entry indexes + */ + public int[] getEntryIndexes(List entries) { + int[] retval = new int[entries.size()]; + + for (int i = 0; i < entries.size(); i++) { + retval[i] = indexOfJobEntry(entries.get(i)); + } + + return retval; + } + + /** + * Find start. + * + * @return the job entry copy + */ + public JobEntryCopy findStart() { + for (int i = 0; i < nrJobEntries(); i++) { + if (getJobEntry(i).isStart()) { + return getJobEntry(i); + } + } + return null; + } + + /** + * Gets a textual representation of the job. If its name has been set, it will be returned, otherwise the classname is + * returned. + * + * @return the textual representation of the job. + */ + public String toString() { + if (!Const.isEmpty(filename)) { + if (Const.isEmpty(name)) { + return filename; + } else { + return filename + " : " + name; + } + } - if ( hi.isEnabled() && hi.getToEntry().equals( to ) ) { - if ( count == nr ) { - return hi.getFromEntry(); + if (name != null) { + if (directory != null) { + String path = directory.getPath(); + if (path.endsWith(RepositoryDirectory.DIRECTORY_SEPARATOR)) { + return path + name; + } else { + return path + RepositoryDirectory.DIRECTORY_SEPARATOR + name; + } + } else { + return name; + } + } else { + return JobMeta.class.getName(); } - count++; - } } - return null; - } - - /** - * Find nr next job entries. - * - * @param from the from - * @return the int - */ - public int findNrNextJobEntries( JobEntryCopy from ) { - int count = 0; - for ( JobHopMeta hi : jobhops ) { - // Look at all the hops - if ( hi.isEnabled() && ( hi.getFromEntry() != null ) && hi.getFromEntry().equals( from ) ) { - count++; - } + /** + * Gets the boolean value of batch id passed. + * + * @return Returns the batchIdPassed. + */ + public boolean isBatchIdPassed() { + return batchIdPassed; } - return count; - } - /** - * Find next job entry. - * - * @param from the from - * @param cnt the cnt - * @return the job entry copy - */ - public JobEntryCopy findNextJobEntry( JobEntryCopy from, int cnt ) { - int count = 0; + /** + * Sets the batch id passed. + * + * @param batchIdPassed The batchIdPassed to set. + */ + public void setBatchIdPassed(boolean batchIdPassed) { + this.batchIdPassed = batchIdPassed; + } - for ( JobHopMeta hi : jobhops ) { - // Look at all the hops + public List getSQLStatements(Repository repository, ProgressMonitorListener monitor) + throws KettleException { + return getSQLStatements(repository, null, monitor); + } - if ( hi.isEnabled() && ( hi.getFromEntry() != null ) && hi.getFromEntry().equals( from ) ) { - if ( count == cnt ) { - return hi.getToEntry(); + /** + * Builds a list of all the SQL statements that this transformation needs in order to work properly. + * + * @return An ArrayList of SQLStatement objects. + */ + public List getSQLStatements(Repository repository, IMetaStore metaStore, + ProgressMonitorListener monitor) throws KettleException { + if (monitor != null) { + monitor + .beginTask(BaseMessages.getString(PKG, "JobMeta.Monitor.GettingSQLNeededForThisJob"), nrJobEntries() + 1); } - count++; - } - } - return null; - } + List stats = new ArrayList(); - /** - * Checks for loop. - * - * @param entry the entry - * @return true, if successful - */ - public boolean hasLoop( JobEntryCopy entry ) { - clearLoopCache(); - return hasLoop( entry, null, true ) || hasLoop( entry, null, false ); - } + for (int i = 0; i < nrJobEntries(); i++) { + JobEntryCopy copy = getJobEntry(i); + if (monitor != null) { + monitor.subTask(BaseMessages.getString(PKG, "JobMeta.Monitor.GettingSQLForJobEntryCopy") + copy + "]"); + } + stats.addAll(copy.getEntry().getSQLStatements(repository, metaStore, this)); + stats.addAll(compatibleGetEntrySQLStatements(copy.getEntry(), repository)); + stats.addAll(compatibleGetEntrySQLStatements(copy.getEntry(), repository, this)); + if (monitor != null) { + monitor.worked(1); + } + } - /** - * Checks for loop. - * - * @param entry the entry - * @param lookup the lookup - * @return true, if successful - */ - public boolean hasLoop( JobEntryCopy entry, JobEntryCopy lookup, boolean info ) { - String cacheKey = - entry.getName() + " - " + ( lookup != null ? lookup.getName() : "" ) + " - " + ( info ? "true" : "false" ); - - Boolean loop = loopCache.get( cacheKey ); - if ( loop != null ) { - return loop.booleanValue(); - } - - boolean hasLoop = false; - - int nr = findNrPrevJobEntries( entry, info ); - for ( int i = 0; i < nr && !hasLoop; i++ ) { - JobEntryCopy prevJobMeta = findPrevJobEntry( entry, i, info ); - if ( prevJobMeta != null ) { - if ( prevJobMeta.equals( entry ) ) { - hasLoop = true; - break; // no need to check more but caching this one below - } else if ( prevJobMeta.equals( lookup ) ) { - hasLoop = true; - break; // no need to check more but caching this one below - } else if ( hasLoop( prevJobMeta, lookup == null ? entry : lookup, info ) ) { - hasLoop = true; - break; // no need to check more but caching this one below - } - } - } - // Store in the cache... - // - loopCache.put( cacheKey, Boolean.valueOf( hasLoop ) ); - return hasLoop; - } - - /** - * Clears the loop cache. - */ - private void clearLoopCache() { - loopCache.clear(); - } + // Also check the sql for the logtable... + if (monitor != null) { + monitor.subTask(BaseMessages.getString(PKG, "JobMeta.Monitor.GettingSQLStatementsForJobLogTables")); + } + if (jobLogTable.getDatabaseMeta() != null && !Const.isEmpty(jobLogTable.getTableName())) { + Database db = new Database(this, jobLogTable.getDatabaseMeta()); + try { + db.connect(); + RowMetaInterface fields = jobLogTable.getLogRecord(LogStatus.START, null, null).getRowMeta(); + String sql = db.getDDL(jobLogTable.getTableName(), fields); + if (sql != null && sql.length() > 0) { + SQLStatement stat = new SQLStatement(BaseMessages.getString(PKG, "JobMeta.SQLFeedback.ThisJob"), + jobLogTable.getDatabaseMeta(), sql); + stats.add(stat); + } + } catch (KettleDatabaseException dbe) { + SQLStatement stat = new SQLStatement(BaseMessages.getString(PKG, "JobMeta.SQLFeedback.ThisJob"), + jobLogTable.getDatabaseMeta(), null); + stat.setError( + BaseMessages.getString(PKG, "JobMeta.SQLFeedback.ErrorObtainingJobLogTableInfo") + dbe.getMessage()); + stats.add(stat); + } finally { + db.disconnect(); + } + } + if (monitor != null) { + monitor.worked(1); + } + if (monitor != null) { + monitor.done(); + } - /** - * Checks if is entry used in hops. - * - * @param jge the jge - * @return true, if is entry used in hops - */ - public boolean isEntryUsedInHops( JobEntryCopy jge ) { - JobHopMeta fr = findJobHopFrom( jge ); - JobHopMeta to = findJobHopTo( jge ); - if ( fr != null || to != null ) { - return true; + return stats; } - return false; - } - /** - * Count entries. - * - * @param name the name - * @return the int - */ - public int countEntries( String name ) { - int count = 0; - int i; - for ( i = 0; i < nrJobEntries(); i++ ) { - // Look at all the hops; - - JobEntryCopy je = getJobEntry( i ); - if ( je.getName().equalsIgnoreCase( name ) ) { - count++; - } - } - return count; - } - - /** - * Find unused nr. - * - * @param name the name - * @return the int - */ - public int findUnusedNr( String name ) { - int nr = 1; - JobEntryCopy je = findJobEntry( name, nr, true ); - while ( je != null ) { - nr++; - // log.logDebug("findUnusedNr()", "Trying unused nr: "+nr); - je = findJobEntry( name, nr, true ); - } - return nr; - } - - /** - * Find max nr. - * - * @param name the name - * @return the int - */ - public int findMaxNr( String name ) { - int max = 0; - for ( int i = 0; i < nrJobEntries(); i++ ) { - JobEntryCopy je = getJobEntry( i ); - if ( je.getName().equalsIgnoreCase( name ) ) { - if ( je.getNr() > max ) { - max = je.getNr(); - } - } + @SuppressWarnings("deprecation") + private Collection compatibleGetEntrySQLStatements(JobEntryInterface entry, + Repository repository, VariableSpace variableSpace) throws KettleException { + return entry.getSQLStatements(repository, variableSpace); } - return max; - } - /** - * Proposes an alternative job entry name when the original already exists... - * - * @param entryname The job entry name to find an alternative for.. - * @return The alternative stepname. - */ - public String getAlternativeJobentryName( String entryname ) { - String newname = entryname; - JobEntryCopy jec = findJobEntry( newname ); - int nr = 1; - while ( jec != null ) { - nr++; - newname = entryname + " " + nr; - jec = findJobEntry( newname ); + @SuppressWarnings("deprecation") + private Collection compatibleGetEntrySQLStatements(JobEntryInterface entry, + Repository repository) throws KettleException { + return entry.getSQLStatements(repository); } - return newname; - } - - /** - * Gets the all job graph entries. - * - * @param name the name - * @return the all job graph entries - */ - public JobEntryCopy[] getAllJobGraphEntries( String name ) { - int count = 0; - for ( int i = 0; i < nrJobEntries(); i++ ) { - JobEntryCopy je = getJobEntry( i ); - if ( je.getName().equalsIgnoreCase( name ) ) { - count++; - } - } - JobEntryCopy[] retval = new JobEntryCopy[count]; - - count = 0; - for ( int i = 0; i < nrJobEntries(); i++ ) { - JobEntryCopy je = getJobEntry( i ); - if ( je.getName().equalsIgnoreCase( name ) ) { - retval[count] = je; - count++; - } - } - return retval; - } - - /** - * Gets the all job hops using. - * - * @param name the name - * @return the all job hops using - */ - public JobHopMeta[] getAllJobHopsUsing( String name ) { - List hops = new ArrayList(); + /** + * Gets the arguments used for this job. + * + * @return Returns the arguments. + * @deprecated Moved to the Job class + */ + @Deprecated + public String[] getArguments() { + return arguments; + } - for ( JobHopMeta hi : jobhops ) { - // Look at all the hops + /** + * Sets the arguments. + * + * @param arguments The arguments to set. + * @deprecated moved to the job class + */ + @Deprecated + public void setArguments(String[] arguments) { + this.arguments = arguments; + } - if ( hi.getFromEntry() != null && hi.getToEntry() != null ) { - if ( hi.getFromEntry().getName().equalsIgnoreCase( name ) || hi.getToEntry().getName() - .equalsIgnoreCase( name ) ) { - hops.add( hi ); + /** + * Get a list of all the strings used in this job. + * + * @return A list of StringSearchResult with strings used in the job + */ + public List getStringList(boolean searchSteps, boolean searchDatabases, boolean searchNotes) { + List stringList = new ArrayList(); + + if (searchSteps) { + // Loop over all steps in the transformation and see what the used + // vars are... + for (int i = 0; i < nrJobEntries(); i++) { + JobEntryCopy entryMeta = getJobEntry(i); + stringList.add(new StringSearchResult(entryMeta.getName(), entryMeta, this, + BaseMessages.getString(PKG, "JobMeta.SearchMetadata.JobEntryName"))); + if (entryMeta.getDescription() != null) { + stringList.add(new StringSearchResult(entryMeta.getDescription(), entryMeta, this, + BaseMessages.getString(PKG, "JobMeta.SearchMetadata.JobEntryDescription"))); + } + JobEntryInterface metaInterface = entryMeta.getEntry(); + StringSearcher.findMetaData(metaInterface, 1, stringList, entryMeta, this); + } } - } - } - return hops.toArray( new JobHopMeta[hops.size()] ); - } - public boolean isPathExist( JobEntryInterface from, JobEntryInterface to ) { - for ( JobHopMeta hi : jobhops ) { - if ( hi.getFromEntry() != null && hi.getToEntry() != null ) { - if ( hi.getFromEntry().getName().equalsIgnoreCase( from.getName() ) ) { - if ( hi.getToEntry().getName().equalsIgnoreCase( to.getName() ) ) { - return true; - } - if ( isPathExist( hi.getToEntry().getEntry(), to ) ) { - return true; - } + // Loop over all steps in the transformation and see what the used vars + // are... + if (searchDatabases) { + for (int i = 0; i < nrDatabases(); i++) { + DatabaseMeta meta = getDatabase(i); + stringList.add(new StringSearchResult(meta.getName(), meta, this, + BaseMessages.getString(PKG, "JobMeta.SearchMetadata.DatabaseConnectionName"))); + if (meta.getHostname() != null) { + stringList.add(new StringSearchResult(meta.getHostname(), meta, this, + BaseMessages.getString(PKG, "JobMeta.SearchMetadata.DatabaseHostName"))); + } + if (meta.getDatabaseName() != null) { + stringList.add(new StringSearchResult(meta.getDatabaseName(), meta, this, + BaseMessages.getString(PKG, "JobMeta.SearchMetadata.DatabaseName"))); + } + if (meta.getUsername() != null) { + stringList.add(new StringSearchResult(meta.getUsername(), meta, this, + BaseMessages.getString(PKG, "JobMeta.SearchMetadata.DatabaseUsername"))); + } + if (meta.getPluginId() != null) { + stringList.add(new StringSearchResult(meta.getPluginId(), meta, this, + BaseMessages.getString(PKG, "JobMeta.SearchMetadata.DatabaseTypeDescription"))); + } + if (meta.getDatabasePortNumberString() != null) { + stringList.add(new StringSearchResult(meta.getDatabasePortNumberString(), meta, this, + BaseMessages.getString(PKG, "JobMeta.SearchMetadata.DatabasePort"))); + } + if (meta.getServername() != null) { + stringList.add(new StringSearchResult(meta.getServername(), meta, this, + BaseMessages.getString(PKG, "JobMeta.SearchMetadata.DatabaseServer"))); + } + // if ( includePasswords ) + // { + if (meta.getPassword() != null) { + stringList.add(new StringSearchResult(meta.getPassword(), meta, this, + BaseMessages.getString(PKG, "JobMeta.SearchMetadata.DatabasePassword"))); + // } + } + } } - } - } - return false; - } + // Loop over all steps in the transformation and see what the used vars + // are... + if (searchNotes) { + for (int i = 0; i < nrNotes(); i++) { + NotePadMeta meta = getNote(i); + if (meta.getNote() != null) { + stringList.add(new StringSearchResult(meta.getNote(), meta, this, + BaseMessages.getString(PKG, "JobMeta.SearchMetadata.NotepadText"))); + } + } + } - /** - * Select all. - */ - public void selectAll() { - int i; - for ( i = 0; i < nrJobEntries(); i++ ) { - JobEntryCopy ce = getJobEntry( i ); - ce.setSelected( true ); - } - for ( i = 0; i < nrNotes(); i++ ) { - NotePadMeta ni = getNote( i ); - ni.setSelected( true ); - } - setChanged(); - notifyObservers( "refreshGraph" ); - } - - /** - * Unselect all. - */ - public void unselectAll() { - int i; - for ( i = 0; i < nrJobEntries(); i++ ) { - JobEntryCopy ce = getJobEntry( i ); - ce.setSelected( false ); + return stringList; } - for ( i = 0; i < nrNotes(); i++ ) { - NotePadMeta ni = getNote( i ); - ni.setSelected( false ); - } - } - - /** - * Gets the maximum. - * - * @return the maximum - */ - public Point getMaximum() { - int maxx = 0, maxy = 0; - for ( int i = 0; i < nrJobEntries(); i++ ) { - JobEntryCopy entry = getJobEntry( i ); - Point loc = entry.getLocation(); - if ( loc.x > maxx ) { - maxx = loc.x; - } - if ( loc.y > maxy ) { - maxy = loc.y; - } - } - for ( int i = 0; i < nrNotes(); i++ ) { - NotePadMeta ni = getNote( i ); - Point loc = ni.getLocation(); - if ( loc.x + ni.width > maxx ) { - maxx = loc.x + ni.width; - } - if ( loc.y + ni.height > maxy ) { - maxy = loc.y + ni.height; - } - } - - return new Point( maxx + 100, maxy + 100 ); - } - - /** - * Get the minimum point on the canvas of a job - * - * @return Minimum coordinate of a step in the job - */ - public Point getMinimum() { - int minx = Integer.MAX_VALUE; - int miny = Integer.MAX_VALUE; - for ( int i = 0; i < nrJobEntries(); i++ ) { - JobEntryCopy jobEntryCopy = getJobEntry( i ); - Point loc = jobEntryCopy.getLocation(); - if ( loc.x < minx ) { - minx = loc.x; - } - if ( loc.y < miny ) { - miny = loc.y; - } - } - for ( int i = 0; i < nrNotes(); i++ ) { - NotePadMeta notePadMeta = getNote( i ); - Point loc = notePadMeta.getLocation(); - if ( loc.x < minx ) { - minx = loc.x; - } - if ( loc.y < miny ) { - miny = loc.y; - } - } - - if ( minx > 20 ) { - minx -= 20; - } else { - minx = 0; - } - if ( miny > 20 ) { - miny -= 20; - } else { - miny = 0; - } - - return new Point( minx, miny ); - } - - /** - * Gets the selected locations. - * - * @return the selected locations - */ - public Point[] getSelectedLocations() { - List selectedEntries = getSelectedEntries(); - Point[] retval = new Point[selectedEntries.size()]; - for ( int i = 0; i < retval.length; i++ ) { - JobEntryCopy si = selectedEntries.get( i ); - Point p = si.getLocation(); - retval[i] = new Point( p.x, p.y ); // explicit copy of location - } - return retval; - } - - /** - * Get all the selected note locations - * - * @return The selected step and notes locations. - */ - public Point[] getSelectedNoteLocations() { - List points = new ArrayList(); - for ( NotePadMeta ni : getSelectedNotes() ) { - Point p = ni.getLocation(); - points.add( new Point( p.x, p.y ) ); // explicit copy of location - } + /** + * Gets the used variables. + * + * @return the used variables + */ + public List getUsedVariables() { + // Get the list of Strings. + List stringList = getStringList(true, true, false); - return points.toArray( new Point[points.size()] ); - } + List varList = new ArrayList(); - /** - * Gets the selected entries. - * - * @return the selected entries - */ - public List getSelectedEntries() { - List selection = new ArrayList(); - for ( JobEntryCopy je : jobcopies ) { - if ( je.isSelected() ) { - selection.add( je ); - } - } - return selection; - } - - /** - * Gets the entry indexes. - * - * @param entries the entries - * @return the entry indexes - */ - public int[] getEntryIndexes( List entries ) { - int[] retval = new int[entries.size()]; + // Look around in the strings, see what we find... + for (StringSearchResult result : stringList) { + StringUtil.getUsedVariables(result.getString(), varList, false); + } - for ( int i = 0; i < entries.size(); i++ ) { - retval[i] = indexOfJobEntry( entries.get( i ) ); + return varList; } - return retval; - } + /** + * Have job entries changed. + * + * @return true, if successful + */ + public boolean haveJobEntriesChanged() { + if (changedEntries) { + return true; + } - /** - * Find start. - * - * @return the job entry copy - */ - public JobEntryCopy findStart() { - for ( int i = 0; i < nrJobEntries(); i++ ) { - if ( getJobEntry( i ).isStart() ) { - return getJobEntry( i ); - } - } - return null; - } - - /** - * Gets a textual representation of the job. If its name has been set, it will be returned, otherwise the classname is - * returned. - * - * @return the textual representation of the job. - */ - public String toString() { - if ( !Const.isEmpty( filename ) ) { - if ( Const.isEmpty( name ) ) { - return filename; - } else { - return filename + " : " + name; - } - } - - if ( name != null ) { - if ( directory != null ) { - String path = directory.getPath(); - if ( path.endsWith( RepositoryDirectory.DIRECTORY_SEPARATOR ) ) { - return path + name; - } else { - return path + RepositoryDirectory.DIRECTORY_SEPARATOR + name; + for (int i = 0; i < nrJobEntries(); i++) { + JobEntryCopy entry = getJobEntry(i); + if (entry.hasChanged()) { + return true; + } } - } else { - return name; - } - } else { - return JobMeta.class.getName(); + return false; } - } - - /** - * Gets the boolean value of batch id passed. - * - * @return Returns the batchIdPassed. - */ - public boolean isBatchIdPassed() { - return batchIdPassed; - } - - /** - * Sets the batch id passed. - * - * @param batchIdPassed The batchIdPassed to set. - */ - public void setBatchIdPassed( boolean batchIdPassed ) { - this.batchIdPassed = batchIdPassed; - } - - public List getSQLStatements( Repository repository, ProgressMonitorListener monitor ) - throws KettleException { - return getSQLStatements( repository, null, monitor ); - } - - /** - * Builds a list of all the SQL statements that this transformation needs in order to work properly. - * - * @return An ArrayList of SQLStatement objects. - */ - public List getSQLStatements( Repository repository, IMetaStore metaStore, - ProgressMonitorListener monitor ) throws KettleException { - if ( monitor != null ) { - monitor - .beginTask( BaseMessages.getString( PKG, "JobMeta.Monitor.GettingSQLNeededForThisJob" ), nrJobEntries() + 1 ); - } - List stats = new ArrayList(); - - for ( int i = 0; i < nrJobEntries(); i++ ) { - JobEntryCopy copy = getJobEntry( i ); - if ( monitor != null ) { - monitor.subTask( BaseMessages.getString( PKG, "JobMeta.Monitor.GettingSQLForJobEntryCopy" ) + copy + "]" ); - } - stats.addAll( copy.getEntry().getSQLStatements( repository, metaStore, this ) ); - stats.addAll( compatibleGetEntrySQLStatements( copy.getEntry(), repository ) ); - stats.addAll( compatibleGetEntrySQLStatements( copy.getEntry(), repository, this ) ); - if ( monitor != null ) { - monitor.worked( 1 ); - } - } - - // Also check the sql for the logtable... - if ( monitor != null ) { - monitor.subTask( BaseMessages.getString( PKG, "JobMeta.Monitor.GettingSQLStatementsForJobLogTables" ) ); - } - if ( jobLogTable.getDatabaseMeta() != null && !Const.isEmpty( jobLogTable.getTableName() ) ) { - Database db = new Database( this, jobLogTable.getDatabaseMeta() ); - try { - db.connect(); - RowMetaInterface fields = jobLogTable.getLogRecord( LogStatus.START, null, null ).getRowMeta(); - String sql = db.getDDL( jobLogTable.getTableName(), fields ); - if ( sql != null && sql.length() > 0 ) { - SQLStatement stat = new SQLStatement( BaseMessages.getString( PKG, "JobMeta.SQLFeedback.ThisJob" ), - jobLogTable.getDatabaseMeta(), sql ); - stats.add( stat ); - } - } catch ( KettleDatabaseException dbe ) { - SQLStatement stat = new SQLStatement( BaseMessages.getString( PKG, "JobMeta.SQLFeedback.ThisJob" ), - jobLogTable.getDatabaseMeta(), null ); - stat.setError( - BaseMessages.getString( PKG, "JobMeta.SQLFeedback.ErrorObtainingJobLogTableInfo" ) + dbe.getMessage() ); - stats.add( stat ); - } finally { - db.disconnect(); - } - } - if ( monitor != null ) { - monitor.worked( 1 ); - } - if ( monitor != null ) { - monitor.done(); - } - - return stats; - } - - @SuppressWarnings( "deprecation" ) - private Collection compatibleGetEntrySQLStatements( JobEntryInterface entry, - Repository repository, VariableSpace variableSpace ) throws KettleException { - return entry.getSQLStatements( repository, variableSpace ); - } - - @SuppressWarnings( "deprecation" ) - private Collection compatibleGetEntrySQLStatements( JobEntryInterface entry, - Repository repository ) throws KettleException { - return entry.getSQLStatements( repository ); - } - - /** - * Gets the arguments used for this job. - * - * @return Returns the arguments. - * @deprecated Moved to the Job class - */ - @Deprecated - public String[] getArguments() { - return arguments; - } - /** - * Sets the arguments. - * - * @param arguments The arguments to set. - * @deprecated moved to the job class - */ - @Deprecated - public void setArguments( String[] arguments ) { - this.arguments = arguments; - } + /** + * Have job hops changed. + * + * @return true, if successful + */ + public boolean haveJobHopsChanged() { + if (changedHops) { + return true; + } - /** - * Get a list of all the strings used in this job. - * - * @return A list of StringSearchResult with strings used in the job - */ - public List getStringList( boolean searchSteps, boolean searchDatabases, boolean searchNotes ) { - List stringList = new ArrayList(); - - if ( searchSteps ) { - // Loop over all steps in the transformation and see what the used - // vars are... - for ( int i = 0; i < nrJobEntries(); i++ ) { - JobEntryCopy entryMeta = getJobEntry( i ); - stringList.add( new StringSearchResult( entryMeta.getName(), entryMeta, this, - BaseMessages.getString( PKG, "JobMeta.SearchMetadata.JobEntryName" ) ) ); - if ( entryMeta.getDescription() != null ) { - stringList.add( new StringSearchResult( entryMeta.getDescription(), entryMeta, this, - BaseMessages.getString( PKG, "JobMeta.SearchMetadata.JobEntryDescription" ) ) ); - } - JobEntryInterface metaInterface = entryMeta.getEntry(); - StringSearcher.findMetaData( metaInterface, 1, stringList, entryMeta, this ); - } - } - - // Loop over all steps in the transformation and see what the used vars - // are... - if ( searchDatabases ) { - for ( int i = 0; i < nrDatabases(); i++ ) { - DatabaseMeta meta = getDatabase( i ); - stringList.add( new StringSearchResult( meta.getName(), meta, this, - BaseMessages.getString( PKG, "JobMeta.SearchMetadata.DatabaseConnectionName" ) ) ); - if ( meta.getHostname() != null ) { - stringList.add( new StringSearchResult( meta.getHostname(), meta, this, - BaseMessages.getString( PKG, "JobMeta.SearchMetadata.DatabaseHostName" ) ) ); - } - if ( meta.getDatabaseName() != null ) { - stringList.add( new StringSearchResult( meta.getDatabaseName(), meta, this, - BaseMessages.getString( PKG, "JobMeta.SearchMetadata.DatabaseName" ) ) ); - } - if ( meta.getUsername() != null ) { - stringList.add( new StringSearchResult( meta.getUsername(), meta, this, - BaseMessages.getString( PKG, "JobMeta.SearchMetadata.DatabaseUsername" ) ) ); - } - if ( meta.getPluginId() != null ) { - stringList.add( new StringSearchResult( meta.getPluginId(), meta, this, - BaseMessages.getString( PKG, "JobMeta.SearchMetadata.DatabaseTypeDescription" ) ) ); - } - if ( meta.getDatabasePortNumberString() != null ) { - stringList.add( new StringSearchResult( meta.getDatabasePortNumberString(), meta, this, - BaseMessages.getString( PKG, "JobMeta.SearchMetadata.DatabasePort" ) ) ); - } - if ( meta.getServername() != null ) { - stringList.add( new StringSearchResult( meta.getServername(), meta, this, - BaseMessages.getString( PKG, "JobMeta.SearchMetadata.DatabaseServer" ) ) ); - } - // if ( includePasswords ) - // { - if ( meta.getPassword() != null ) { - stringList.add( new StringSearchResult( meta.getPassword(), meta, this, - BaseMessages.getString( PKG, "JobMeta.SearchMetadata.DatabasePassword" ) ) ); - // } - } - } - } - - // Loop over all steps in the transformation and see what the used vars - // are... - if ( searchNotes ) { - for ( int i = 0; i < nrNotes(); i++ ) { - NotePadMeta meta = getNote( i ); - if ( meta.getNote() != null ) { - stringList.add( new StringSearchResult( meta.getNote(), meta, this, - BaseMessages.getString( PKG, "JobMeta.SearchMetadata.NotepadText" ) ) ); - } - } - } - - return stringList; - } - - /** - * Gets the used variables. - * - * @return the used variables - */ - public List getUsedVariables() { - // Get the list of Strings. - List stringList = getStringList( true, true, false ); + for (JobHopMeta hi : jobhops) { + // Look at all the hops - List varList = new ArrayList(); + if (hi.hasChanged()) { + return true; + } + } + return false; + } - // Look around in the strings, see what we find... - for ( StringSearchResult result : stringList ) { - StringUtil.getUsedVariables( result.getString(), varList, false ); + /** + * Gets the version of the job. + * + * @return The version of the job + */ + public String getJobversion() { + return jobVersion; } - return varList; - } + /** + * Gets the status of the job. + * + * @return the status of the job + */ + public int getJobstatus() { + return jobStatus; + } - /** - * Have job entries changed. - * - * @return true, if successful - */ - public boolean haveJobEntriesChanged() { - if ( changedEntries ) { - return true; + /** + * Set the version of the job. + * + * @param jobVersion The new version description of the job + */ + public void setJobversion(String jobVersion) { + this.jobVersion = jobVersion; } - for ( int i = 0; i < nrJobEntries(); i++ ) { - JobEntryCopy entry = getJobEntry( i ); - if ( entry.hasChanged() ) { - return true; - } + /** + * Set the status of the job. + * + * @param jobStatus The new status description of the job + */ + public void setJobstatus(int jobStatus) { + this.jobStatus = jobStatus; } - return false; - } - /** - * Have job hops changed. - * - * @return true, if successful - */ - public boolean haveJobHopsChanged() { - if ( changedHops ) { - return true; + /** + * Find a jobentry with a certain ID in a list of job entries. + * + * @param jobentries The List of jobentries + * @param id_jobentry The id of the jobentry + * @return The JobEntry object if one was found, null otherwise. + */ + public static final JobEntryInterface findJobEntry(List jobentries, ObjectId id_jobentry) { + if (jobentries == null) { + return null; + } + + for (JobEntryInterface je : jobentries) { + if (je.getObjectId() != null && je.getObjectId().equals(id_jobentry)) { + return je; + } + } + return null; } - for ( JobHopMeta hi : jobhops ) { - // Look at all the hops + /** + * Find a jobentrycopy with a certain ID in a list of job entry copies. + * + * @param jobcopies The List of jobentry copies + * @param id_jobentry_copy The id of the jobentry copy + * @return The JobEntryCopy object if one was found, null otherwise. + */ + public static final JobEntryCopy findJobEntryCopy(List jobcopies, ObjectId id_jobentry_copy) { + if (jobcopies == null) { + return null; + } - if ( hi.hasChanged() ) { - return true; - } + for (JobEntryCopy jec : jobcopies) { + if (jec.getObjectId() != null && jec.getObjectId().equals(id_jobentry_copy)) { + return jec; + } + } + return null; } - return false; - } - /** - * Gets the version of the job. - * - * @return The version of the job - */ - public String getJobversion() { - return jobVersion; - } + /** + * This method sets various internal kettle variables that can be used by the transformation. + */ + @Override + public void setInternalKettleVariables(VariableSpace var) { + setInternalFilenameKettleVariables(var); + setInternalNameKettleVariable(var); - /** - * Gets the status of the job. - * - * @return the status of the job - */ - public int getJobstatus() { - return jobStatus; - } + // The name of the directory in the repository + variables + .setVariable(Const.INTERNAL_VARIABLE_JOB_REPOSITORY_DIRECTORY, directory != null ? directory.getPath() : ""); - /** - * Set the version of the job. - * - * @param jobVersion The new version description of the job - */ - public void setJobversion( String jobVersion ) { - this.jobVersion = jobVersion; - } + boolean hasRepoDir = getRepositoryDirectory() != null && getRepository() != null; - /** - * Set the status of the job. - * - * @param jobStatus The new status description of the job - */ - public void setJobstatus( int jobStatus ) { - this.jobStatus = jobStatus; - } + // setup fallbacks + if (hasRepoDir) { + variables.setVariable(Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY, + variables.getVariable(Const.INTERNAL_VARIABLE_JOB_REPOSITORY_DIRECTORY)); + } else { + variables.setVariable(Const.INTERNAL_VARIABLE_JOB_REPOSITORY_DIRECTORY, + variables.getVariable(Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY)); + } - /** - * Find a jobentry with a certain ID in a list of job entries. - * - * @param jobentries The List of jobentries - * @param id_jobentry The id of the jobentry - * @return The JobEntry object if one was found, null otherwise. - */ - public static final JobEntryInterface findJobEntry( List jobentries, ObjectId id_jobentry ) { - if ( jobentries == null ) { - return null; + variables.setVariable(Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY, variables.getVariable( + repository != null ? Const.INTERNAL_VARIABLE_JOB_REPOSITORY_DIRECTORY + : Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY)); } - for ( JobEntryInterface je : jobentries ) { - if ( je.getObjectId() != null && je.getObjectId().equals( id_jobentry ) ) { - return je; - } + /** + * Sets the internal name kettle variable. + * + * @param var the new internal name kettle variable + */ + @Override + protected void setInternalNameKettleVariable(VariableSpace var) { + // The name of the job + variables.setVariable(Const.INTERNAL_VARIABLE_JOB_NAME, Const.NVL(name, "")); } - return null; - } - /** - * Find a jobentrycopy with a certain ID in a list of job entry copies. - * - * @param jobcopies The List of jobentry copies - * @param id_jobentry_copy The id of the jobentry copy - * @return The JobEntryCopy object if one was found, null otherwise. - */ - public static final JobEntryCopy findJobEntryCopy( List jobcopies, ObjectId id_jobentry_copy ) { - if ( jobcopies == null ) { - return null; + /** + * Sets the internal filename kettle variables. + * + * @param var the new internal filename kettle variables + */ + @Override + protected void setInternalFilenameKettleVariables(VariableSpace var) { + if (filename != null) { + // we have a filename that's defined. + try { + FileObject fileObject = KettleVFS.getFileObject(filename, var); + FileName fileName = fileObject.getName(); + + // The filename of the job + variables.setVariable(Const.INTERNAL_VARIABLE_JOB_FILENAME_NAME, fileName.getBaseName()); + + // The directory of the job + FileName fileDir = fileName.getParent(); + variables.setVariable(Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY, fileDir.getURI()); + } catch (Exception e) { + variables.setVariable(Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY, ""); + variables.setVariable(Const.INTERNAL_VARIABLE_JOB_FILENAME_NAME, ""); + } + } else { + variables.setVariable(Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY, ""); + variables.setVariable(Const.INTERNAL_VARIABLE_JOB_FILENAME_NAME, ""); + } } - for ( JobEntryCopy jec : jobcopies ) { - if ( jec.getObjectId() != null && jec.getObjectId().equals( id_jobentry_copy ) ) { - return jec; - } + @Deprecated + public void checkJobEntries(List remarks, boolean only_selected, + ProgressMonitorListener monitor) { + checkJobEntries(remarks, only_selected, monitor, this, null, null); } - return null; - } - /** - * This method sets various internal kettle variables that can be used by the transformation. - */ - @Override - public void setInternalKettleVariables( VariableSpace var ) { - setInternalFilenameKettleVariables( var ); - setInternalNameKettleVariable( var ); + /** + * Check all job entries within the job. Each Job Entry has the opportunity to check their own settings. + * + * @param remarks List of CheckResult remarks inserted into by each JobEntry + * @param only_selected true if you only want to check the selected jobs + * @param monitor Progress monitor (not presently in use) + */ + public void checkJobEntries(List remarks, boolean only_selected, + ProgressMonitorListener monitor, VariableSpace space, Repository repository, IMetaStore metaStore) { + remarks.clear(); // Empty remarks + if (monitor != null) { + monitor.beginTask(BaseMessages.getString(PKG, "JobMeta.Monitor.VerifyingThisJobEntryTask.Title"), + jobcopies.size() + 2); + } + boolean stop_checking = false; + for (int i = 0; i < jobcopies.size() && !stop_checking; i++) { + JobEntryCopy copy = jobcopies.get(i); // get the job entry copy + if ((!only_selected) || (only_selected && copy.isSelected())) { + JobEntryInterface entry = copy.getEntry(); + if (entry != null) { + if (monitor != null) { + monitor + .subTask(BaseMessages.getString(PKG, "JobMeta.Monitor.VerifyingJobEntry.Title", entry.getName())); + } + entry.check(remarks, this, space, repository, metaStore); + compatibleEntryCheck(entry, remarks); + if (monitor != null) { + monitor.worked(1); // progress bar... + if (monitor.isCanceled()) { + stop_checking = true; + } + } + } + } + if (monitor != null) { + monitor.worked(1); + } + } + if (monitor != null) { + monitor.done(); + } + } - // The name of the directory in the repository - variables - .setVariable( Const.INTERNAL_VARIABLE_JOB_REPOSITORY_DIRECTORY, directory != null ? directory.getPath() : "" ); + @SuppressWarnings("deprecation") + private void compatibleEntryCheck(JobEntryInterface entry, List remarks) { + entry.check(remarks, this); + } - boolean hasRepoDir = getRepositoryDirectory() != null && getRepository() != null; + /** + * Gets the resource dependencies. + * + * @return the resource dependencies + */ + public List getResourceDependencies() { + List resourceReferences = new ArrayList(); + JobEntryCopy copy = null; + JobEntryInterface entry = null; + for (int i = 0; i < jobcopies.size(); i++) { + copy = jobcopies.get(i); // get the job entry copy + entry = copy.getEntry(); + resourceReferences.addAll(entry.getResourceDependencies(this)); + } - // setup fallbacks - if ( hasRepoDir ) { - variables.setVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY, - variables.getVariable( Const.INTERNAL_VARIABLE_JOB_REPOSITORY_DIRECTORY ) ); - } else { - variables.setVariable( Const.INTERNAL_VARIABLE_JOB_REPOSITORY_DIRECTORY, - variables.getVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY ) ); + return resourceReferences; } - variables.setVariable( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY, variables.getVariable( - repository != null ? Const.INTERNAL_VARIABLE_JOB_REPOSITORY_DIRECTORY - : Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY ) ); - } - - /** - * Sets the internal name kettle variable. - * - * @param var the new internal name kettle variable - */ - @Override - protected void setInternalNameKettleVariable( VariableSpace var ) { - // The name of the job - variables.setVariable( Const.INTERNAL_VARIABLE_JOB_NAME, Const.NVL( name, "" ) ); - } - - /** - * Sets the internal filename kettle variables. - * - * @param var the new internal filename kettle variables - */ - @Override - protected void setInternalFilenameKettleVariables( VariableSpace var ) { - if ( filename != null ) { - // we have a filename that's defined. - try { - FileObject fileObject = KettleVFS.getFileObject( filename, var ); - FileName fileName = fileObject.getName(); - - // The filename of the job - variables.setVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_NAME, fileName.getBaseName() ); - - // The directory of the job - FileName fileDir = fileName.getParent(); - variables.setVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY, fileDir.getURI() ); - } catch ( Exception e ) { - variables.setVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY, "" ); - variables.setVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_NAME, "" ); - } - } else { - variables.setVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY, "" ); - variables.setVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_NAME, "" ); - } - } - - @Deprecated - public void checkJobEntries( List remarks, boolean only_selected, - ProgressMonitorListener monitor ) { - checkJobEntries( remarks, only_selected, monitor, this, null, null ); - } - - /** - * Check all job entries within the job. Each Job Entry has the opportunity to check their own settings. - * - * @param remarks List of CheckResult remarks inserted into by each JobEntry - * @param only_selected true if you only want to check the selected jobs - * @param monitor Progress monitor (not presently in use) - */ - public void checkJobEntries( List remarks, boolean only_selected, - ProgressMonitorListener monitor, VariableSpace space, Repository repository, IMetaStore metaStore ) { - remarks.clear(); // Empty remarks - if ( monitor != null ) { - monitor.beginTask( BaseMessages.getString( PKG, "JobMeta.Monitor.VerifyingThisJobEntryTask.Title" ), - jobcopies.size() + 2 ); - } - boolean stop_checking = false; - for ( int i = 0; i < jobcopies.size() && !stop_checking; i++ ) { - JobEntryCopy copy = jobcopies.get( i ); // get the job entry copy - if ( ( !only_selected ) || ( only_selected && copy.isSelected() ) ) { - JobEntryInterface entry = copy.getEntry(); - if ( entry != null ) { - if ( monitor != null ) { - monitor - .subTask( BaseMessages.getString( PKG, "JobMeta.Monitor.VerifyingJobEntry.Title", entry.getName() ) ); - } - entry.check( remarks, this, space, repository, metaStore ); - compatibleEntryCheck( entry, remarks ); - if ( monitor != null ) { - monitor.worked( 1 ); // progress bar... - if ( monitor.isCanceled() ) { - stop_checking = true; - } - } - } - } - if ( monitor != null ) { - monitor.worked( 1 ); - } - } - if ( monitor != null ) { - monitor.done(); - } - } - - @SuppressWarnings( "deprecation" ) - private void compatibleEntryCheck( JobEntryInterface entry, List remarks ) { - entry.check( remarks, this ); - } - - /** - * Gets the resource dependencies. - * - * @return the resource dependencies - */ - public List getResourceDependencies() { - List resourceReferences = new ArrayList(); - JobEntryCopy copy = null; - JobEntryInterface entry = null; - for ( int i = 0; i < jobcopies.size(); i++ ) { - copy = jobcopies.get( i ); // get the job entry copy - entry = copy.getEntry(); - resourceReferences.addAll( entry.getResourceDependencies( this ) ); - } - - return resourceReferences; - } - - public String exportResources( VariableSpace space, Map definitions, - ResourceNamingInterface namingInterface, Repository repository, IMetaStore metaStore ) throws KettleException { - String resourceName = null; - try { - // Handle naming for both repository and XML bases resources... - // - String baseName; - String originalPath; - String fullname; - String extension = "kjb"; - if ( Const.isEmpty( getFilename() ) ) { - // Assume repository... - // - originalPath = directory.getPath(); - baseName = getName(); - fullname = - directory.getPath() + ( directory.getPath().endsWith( RepositoryDirectory.DIRECTORY_SEPARATOR ) ? "" - : RepositoryDirectory.DIRECTORY_SEPARATOR ) + getName() + "." + extension; // - } else { - // Assume file - // - FileObject fileObject = KettleVFS.getFileObject( space.environmentSubstitute( getFilename() ), space ); - originalPath = fileObject.getParent().getName().getPath(); - baseName = fileObject.getName().getBaseName(); - fullname = fileObject.getName().getPath(); - } - - resourceName = namingInterface - .nameResource( baseName, originalPath, extension, ResourceNamingInterface.FileNamingType.JOB ); - ResourceDefinition definition = definitions.get( resourceName ); - if ( definition == null ) { - // If we do this once, it will be plenty :-) - // - JobMeta jobMeta = (JobMeta) this.realClone( false ); + public String exportResources(VariableSpace space, Map definitions, + ResourceNamingInterface namingInterface, Repository repository, IMetaStore metaStore) throws KettleException { + String resourceName = null; + try { + // Handle naming for both repository and XML bases resources... + // + String baseName; + String originalPath; + String fullname; + String extension = "kjb"; + if (Const.isEmpty(getFilename())) { + // Assume repository... + // + originalPath = directory.getPath(); + baseName = getName(); + fullname = + directory.getPath() + (directory.getPath().endsWith(RepositoryDirectory.DIRECTORY_SEPARATOR) ? "" + : RepositoryDirectory.DIRECTORY_SEPARATOR) + getName() + "." + extension; // + } else { + // Assume file + // + FileObject fileObject = KettleVFS.getFileObject(space.environmentSubstitute(getFilename()), space); + originalPath = fileObject.getParent().getName().getPath(); + baseName = fileObject.getName().getBaseName(); + fullname = fileObject.getName().getPath(); + } - // All objects get re-located to the root folder, - // but, when exporting, we need to see current directory - // in order to make 'Internal.Entry.Current.Directory' variable work - jobMeta.setRepositoryDirectory( directory ); + resourceName = namingInterface + .nameResource(baseName, originalPath, extension, ResourceNamingInterface.FileNamingType.JOB); + ResourceDefinition definition = definitions.get(resourceName); + if (definition == null) { + // If we do this once, it will be plenty :-) + // + JobMeta jobMeta = (JobMeta) this.realClone(false); + + // All objects get re-located to the root folder, + // but, when exporting, we need to see current directory + // in order to make 'Internal.Entry.Current.Directory' variable work + jobMeta.setRepositoryDirectory(directory); + + // Add used resources, modify transMeta accordingly + // Go through the list of steps, etc. + // These critters change the steps in the cloned TransMeta + // At the end we make a new XML version of it in "exported" + // format... + + // loop over steps, databases will be exported to XML anyway. + // + for (JobEntryCopy jobEntry : jobMeta.jobcopies) { + compatibleJobEntryExportResources(jobEntry.getEntry(), jobMeta, definitions, namingInterface, repository); + jobEntry.getEntry().exportResources(jobMeta, definitions, namingInterface, repository, metaStore); + } + + // Set a number of parameters for all the data files referenced so far... + // + Map directoryMap = namingInterface.getDirectoryMap(); + if (directoryMap != null) { + for (String directory : directoryMap.keySet()) { + String parameterName = directoryMap.get(directory); + jobMeta.addParameterDefinition(parameterName, directory, "Data file path discovered during export"); + } + } + + // At the end, add ourselves to the map... + // + String jobMetaContent = jobMeta.getXML(); + + definition = new ResourceDefinition(resourceName, jobMetaContent); + + // Also remember the original filename (if any), including variables etc. + // + if (Const.isEmpty(this.getFilename())) { // Repository + definition.setOrigin(fullname); + } else { + definition.setOrigin(this.getFilename()); + } + + definitions.put(fullname, definition); + } + } catch (FileSystemException e) { + throw new KettleException( + BaseMessages.getString(PKG, "JobMeta.Exception.AnErrorOccuredReadingJob", getFilename()), e); + } catch (KettleFileException e) { + throw new KettleException( + BaseMessages.getString(PKG, "JobMeta.Exception.AnErrorOccuredReadingJob", getFilename()), e); + } - // Add used resources, modify transMeta accordingly - // Go through the list of steps, etc. - // These critters change the steps in the cloned TransMeta - // At the end we make a new XML version of it in "exported" - // format... + return resourceName; + } - // loop over steps, databases will be exported to XML anyway. - // - for ( JobEntryCopy jobEntry : jobMeta.jobcopies ) { - compatibleJobEntryExportResources( jobEntry.getEntry(), jobMeta, definitions, namingInterface, repository ); - jobEntry.getEntry().exportResources( jobMeta, definitions, namingInterface, repository, metaStore ); - } + @SuppressWarnings("deprecation") + private void compatibleJobEntryExportResources(JobEntryInterface entry, JobMeta jobMeta, + Map definitions, ResourceNamingInterface namingInterface, Repository repository2) + throws KettleException { + entry.exportResources(jobMeta, definitions, namingInterface, repository); + } - // Set a number of parameters for all the data files referenced so far... + /** + * See if the name of the supplied job entry copy doesn't collide with any other job entry copy in the job. + * + * @param je The job entry copy to verify the name for. + */ + public void renameJobEntryIfNameCollides(JobEntryCopy je) { + // First see if the name changed. + // If so, we need to verify that the name is not already used in the + // job. // - Map directoryMap = namingInterface.getDirectoryMap(); - if ( directoryMap != null ) { - for ( String directory : directoryMap.keySet() ) { - String parameterName = directoryMap.get( directory ); - jobMeta.addParameterDefinition( parameterName, directory, "Data file path discovered during export" ); - } - } + String newname = je.getName(); - // At the end, add ourselves to the map... + // See if this name exists in the other job entries // - String jobMetaContent = jobMeta.getXML(); - - definition = new ResourceDefinition( resourceName, jobMetaContent ); + boolean found; + int nr = 1; + do { + found = false; + for (JobEntryCopy copy : jobcopies) { + if (copy != je && copy.getName().equalsIgnoreCase(newname) && copy.getNr() == 0) { + found = true; + } + } + if (found) { + nr++; + newname = je.getName() + " (" + nr + ")"; + } + } while (found); - // Also remember the original filename (if any), including variables etc. + // Rename if required. // - if ( Const.isEmpty( this.getFilename() ) ) { // Repository - definition.setOrigin( fullname ); - } else { - definition.setOrigin( this.getFilename() ); - } - - definitions.put( fullname, definition ); - } - } catch ( FileSystemException e ) { - throw new KettleException( - BaseMessages.getString( PKG, "JobMeta.Exception.AnErrorOccuredReadingJob", getFilename() ), e ); - } catch ( KettleFileException e ) { - throw new KettleException( - BaseMessages.getString( PKG, "JobMeta.Exception.AnErrorOccuredReadingJob", getFilename() ), e ); + je.setName(newname); } - return resourceName; - } - - @SuppressWarnings( "deprecation" ) - private void compatibleJobEntryExportResources( JobEntryInterface entry, JobMeta jobMeta, - Map definitions, ResourceNamingInterface namingInterface, Repository repository2 ) - throws KettleException { - entry.exportResources( jobMeta, definitions, namingInterface, repository ); - } - - /** - * See if the name of the supplied job entry copy doesn't collide with any other job entry copy in the job. - * - * @param je The job entry copy to verify the name for. - */ - public void renameJobEntryIfNameCollides( JobEntryCopy je ) { - // First see if the name changed. - // If so, we need to verify that the name is not already used in the - // job. - // - String newname = je.getName(); - - // See if this name exists in the other job entries - // - boolean found; - int nr = 1; - do { - found = false; - for ( JobEntryCopy copy : jobcopies ) { - if ( copy != je && copy.getName().equalsIgnoreCase( newname ) && copy.getNr() == 0 ) { - found = true; - } - } - if ( found ) { - nr++; - newname = je.getName() + " (" + nr + ")"; - } - } while ( found ); - - // Rename if required. - // - je.setName( newname ); - } - - /** - * Gets the job copies. - * - * @return the job copies - */ - public List getJobCopies() { - return jobcopies; - } + /** + * Gets the job copies. + * + * @return the job copies + */ + public List getJobCopies() { + return jobcopies; + } - /** - * Gets the jobhops. - * - * @return the jobhops - */ - public List getJobhops() { - return jobhops; - } + /** + * Gets the jobhops. + * + * @return the jobhops + */ + public List getJobhops() { + return jobhops; + } - /* + /* * (non-Javadoc) * * @see org.pentaho.di.repository.RepositoryElementInterface#getRepositoryElementType() */ - public RepositoryObjectType getRepositoryElementType() { - return REPOSITORY_ELEMENT_TYPE; - } + public RepositoryObjectType getRepositoryElementType() { + return REPOSITORY_ELEMENT_TYPE; + } - /** - * Create a unique list of job entry interfaces - * - * @return - */ - public List composeJobEntryInterfaceList() { - List list = new ArrayList(); + /** + * Create a unique list of job entry interfaces + * + * @return + */ + public List composeJobEntryInterfaceList() { + List list = new ArrayList(); - for ( JobEntryCopy copy : jobcopies ) { - if ( !list.contains( copy.getEntry() ) ) { - list.add( copy.getEntry() ); - } - } + for (JobEntryCopy copy : jobcopies) { + if (!list.contains(copy.getEntry())) { + list.add(copy.getEntry()); + } + } - return list; - } + return list; + } - /* + /* * (non-Javadoc) * * @see org.pentaho.di.core.logging.LoggingObjectInterface#getLogChannelId() */ - public String getLogChannelId() { - return null; - } + public String getLogChannelId() { + return null; + } - /* + /* * (non-Javadoc) * * @see org.pentaho.di.core.logging.LoggingObjectInterface#getObjectType() */ - public LoggingObjectType getObjectType() { - return LoggingObjectType.JOBMETA; - } + public LoggingObjectType getObjectType() { + return LoggingObjectType.JOBMETA; + } - /** - * Gets the job entry log table. - * - * @return the jobEntryLogTable - */ - public JobEntryLogTable getJobEntryLogTable() { - return jobEntryLogTable; - } + /** + * Gets the job entry log table. + * + * @return the jobEntryLogTable + */ + public JobEntryLogTable getJobEntryLogTable() { + return jobEntryLogTable; + } - /** - * Sets the job entry log table. - * - * @param jobEntryLogTable the jobEntryLogTable to set - */ - public void setJobEntryLogTable( JobEntryLogTable jobEntryLogTable ) { - this.jobEntryLogTable = jobEntryLogTable; - } + /** + * Sets the job entry log table. + * + * @param jobEntryLogTable the jobEntryLogTable to set + */ + public void setJobEntryLogTable(JobEntryLogTable jobEntryLogTable) { + this.jobEntryLogTable = jobEntryLogTable; + } - /** - * Gets the log tables. - * - * @return the log tables - */ - public List getLogTables() { - List logTables = new ArrayList(); - logTables.add( jobLogTable ); - logTables.add( jobEntryLogTable ); - logTables.add( channelLogTable ); - logTables.addAll( extraLogTables ); - return logTables; - } - - /** - * Checks whether the job has repository references. - * - * @return true if the job has repository references, false otherwise - */ - public boolean hasRepositoryReferences() { - for ( JobEntryCopy copy : jobcopies ) { - if ( copy.getEntry().hasRepositoryReferences() ) { - return true; - } + /** + * Gets the log tables. + * + * @return the log tables + */ + public List getLogTables() { + List logTables = new ArrayList(); + logTables.add(jobLogTable); + logTables.add(jobEntryLogTable); + logTables.add(channelLogTable); + logTables.addAll(extraLogTables); + return logTables; + } + + /** + * Checks whether the job has repository references. + * + * @return true if the job has repository references, false otherwise + */ + public boolean hasRepositoryReferences() { + for (JobEntryCopy copy : jobcopies) { + if (copy.getEntry().hasRepositoryReferences()) { + return true; + } + } + return false; } - return false; - } - /** - * Look up the references after import - * - * @param repository the repository to reference. - */ - public void lookupRepositoryReferences( Repository repository ) throws KettleException { - KettleException lastThrownException = null; - Map notFoundedReferences = new HashMap<>(); - for ( JobEntryCopy copy : jobcopies ) { - if ( copy.getEntry().hasRepositoryReferences() ) { - try { - copy.getEntry().lookupRepositoryReferences( repository ); - } catch ( IdNotFoundException e ) { - lastThrownException = e; - String path = e.getPathToObject(); - String name = e.getObjectName(); - String key = StringUtils.isEmpty( path ) || path.equals( "null" ) ? name : path + "/" + name; - notFoundedReferences.put( key, e.getObjectType() ); + /** + * Look up the references after import + * + * @param repository the repository to reference. + */ + public void lookupRepositoryReferences(Repository repository) throws KettleException { + KettleException lastThrownException = null; + Map notFoundedReferences = new HashMap<>(); + for (JobEntryCopy copy : jobcopies) { + if (copy.getEntry().hasRepositoryReferences()) { + try { + copy.getEntry().lookupRepositoryReferences(repository); + } catch (IdNotFoundException e) { + lastThrownException = e; + String path = e.getPathToObject(); + String name = e.getObjectName(); + String key = StringUtils.isEmpty(path) || path.equals("null") ? name : path + "/" + name; + notFoundedReferences.put(key, e.getObjectType()); + } + } + } + if (lastThrownException != null && !notFoundedReferences.isEmpty()) { + throw new LookupReferencesException(lastThrownException, notFoundedReferences); } - } } - if ( lastThrownException != null && !notFoundedReferences.isEmpty() ) { - throw new LookupReferencesException( lastThrownException, notFoundedReferences ); + + /** + * Returns whether or not the job is gathering metrics. For a JobMeta this is always false. + * + * @return is gathering metrics = false; + */ + @Override + public boolean isGatheringMetrics() { + return false; } - } - /** - * Returns whether or not the job is gathering metrics. For a JobMeta this is always false. - * - * @return is gathering metrics = false; - */ - @Override - public boolean isGatheringMetrics() { - return false; - } + /** + * Sets whether or not the job is gathering metrics. This is a stub with not executable code. + */ + @Override + public void setGatheringMetrics(boolean gatheringMetrics) { + } - /** - * Sets whether or not the job is gathering metrics. This is a stub with not executable code. - */ - @Override - public void setGatheringMetrics( boolean gatheringMetrics ) { - } - - @Override - public boolean isForcingSeparateLogging() { - return false; - } - - @Override - public void setForcingSeparateLogging( boolean forcingSeparateLogging ) { - } - - /** - * This method needs to be called to store those objects which are used and referenced in the job metadata but not - * saved in the serialization. - * - * @param metaStore The store to save to - * @throws MetaStoreException in case there is an error. - */ - public void saveMetaStoreObjects( Repository repository, IMetaStore metaStore ) throws MetaStoreException { - } + @Override + public boolean isForcingSeparateLogging() { + return false; + } + + @Override + public void setForcingSeparateLogging(boolean forcingSeparateLogging) { + } + + /** + * This method needs to be called to store those objects which are used and referenced in the job metadata but not + * saved in the serialization. + * + * @param metaStore The store to save to + * @throws MetaStoreException in case there is an error. + */ + public void saveMetaStoreObjects(Repository repository, IMetaStore metaStore) throws MetaStoreException { + } - public List getExtraLogTables() { - return extraLogTables; - } + public List getExtraLogTables() { + return extraLogTables; + } - public void setExtraLogTables( List extraLogTables ) { - this.extraLogTables = extraLogTables; - } + public void setExtraLogTables(List extraLogTables) { + this.extraLogTables = extraLogTables; + } - public boolean containsJobCopy( JobEntryCopy jobCopy ) { - return jobcopies.contains( jobCopy ); - } + public boolean containsJobCopy(JobEntryCopy jobCopy) { + return jobcopies.contains(jobCopy); + } - public List getMissingEntries() { - return missingEntries; - } + public List getMissingEntries() { + return missingEntries; + } - public void addMissingEntry( MissingEntry missingEntry ) { - if ( missingEntries == null ) { - missingEntries = new ArrayList(); + public void addMissingEntry(MissingEntry missingEntry) { + if (missingEntries == null) { + missingEntries = new ArrayList(); + } + missingEntries.add(missingEntry); } - missingEntries.add( missingEntry ); - } - public void removeMissingEntry( MissingEntry missingEntry ) { - if ( missingEntries != null && missingEntry != null && missingEntries.contains( missingEntry ) ) { - missingEntries.remove( missingEntry ); + public void removeMissingEntry(MissingEntry missingEntry) { + if (missingEntries != null && missingEntry != null && missingEntries.contains(missingEntry)) { + missingEntries.remove(missingEntry); + } } - } - public boolean hasMissingPlugins() { - return missingEntries != null && !missingEntries.isEmpty(); - } + public boolean hasMissingPlugins() { + return missingEntries != null && !missingEntries.isEmpty(); + } } diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/job/entries/job/JobEntryJob.java b/pentaho-kettle/src/main/java/org/pentaho/di/job/entries/job/JobEntryJob.java index b19920a..e26537c 100644 --- a/pentaho-kettle/src/main/java/org/pentaho/di/job/entries/job/JobEntryJob.java +++ b/pentaho-kettle/src/main/java/org/pentaho/di/job/entries/job/JobEntryJob.java @@ -22,22 +22,9 @@ package org.pentaho.di.job.entries.job; -import java.text.SimpleDateFormat; -import java.util.ArrayList; -import java.util.Calendar; -import java.util.List; -import java.util.Map; -import java.util.UUID; - import org.apache.commons.vfs2.FileObject; import org.pentaho.di.cluster.SlaveServer; -import org.pentaho.di.core.CheckResultInterface; -import org.pentaho.di.core.Const; -import org.pentaho.di.core.ObjectLocationSpecificationMethod; -import org.pentaho.di.core.Result; -import org.pentaho.di.core.ResultFile; -import org.pentaho.di.core.RowMetaAndData; -import org.pentaho.di.core.SQLStatement; +import org.pentaho.di.core.*; import org.pentaho.di.core.database.DatabaseMeta; import org.pentaho.di.core.exception.KettleDatabaseException; import org.pentaho.di.core.exception.KettleException; @@ -61,14 +48,7 @@ import org.pentaho.di.job.entry.JobEntryInterface; import org.pentaho.di.job.entry.validator.AndValidator; import org.pentaho.di.job.entry.validator.JobEntryValidatorUtils; -import org.pentaho.di.repository.ObjectId; -import org.pentaho.di.repository.Repository; -import org.pentaho.di.repository.RepositoryDirectory; -import org.pentaho.di.repository.RepositoryDirectoryInterface; -import org.pentaho.di.repository.RepositoryImportLocation; -import org.pentaho.di.repository.RepositoryObject; -import org.pentaho.di.repository.RepositoryObjectType; -import org.pentaho.di.repository.StringObjectId; +import org.pentaho.di.repository.*; import org.pentaho.di.resource.ResourceDefinition; import org.pentaho.di.resource.ResourceEntry; import org.pentaho.di.resource.ResourceEntry.ResourceType; @@ -78,1490 +58,1470 @@ import org.pentaho.metastore.api.IMetaStore; import org.w3c.dom.Node; +import java.text.SimpleDateFormat; +import java.util.*; + /** * Recursive definition of a Job. This step means that an entire Job has to be executed. It can be the same Job, but * just make sure that you don't get an endless loop. Provide an escape routine using JobEval. * * @author Matt * @since 01-10-2003, Rewritten on 18-06-2004 - * */ public class JobEntryJob extends JobEntryBase implements Cloneable, JobEntryInterface { - private static Class PKG = JobEntryJob.class; // for i18n purposes, needed by Translator2!! + private static Class PKG = JobEntryJob.class; // for i18n purposes, needed by Translator2!! - private String filename; - private String jobname; - private String directory; - private ObjectId jobObjectId; - private ObjectLocationSpecificationMethod specificationMethod; + private String filename; + private String jobname; + private String directory; + private ObjectId jobObjectId; + private ObjectLocationSpecificationMethod specificationMethod; - public String[] arguments; - public boolean argFromPrevious; - public boolean paramsFromPrevious; - public boolean execPerRow; + public String[] arguments; + public boolean argFromPrevious; + public boolean paramsFromPrevious; + public boolean execPerRow; - public String[] parameters; - public String[] parameterFieldNames; - public String[] parameterValues; + public String[] parameters; + public String[] parameterFieldNames; + public String[] parameterValues; - public boolean setLogfile; - public String logfile, logext; - public boolean addDate, addTime; - public LogLevel logFileLevel; + public boolean setLogfile; + public String logfile, logext; + public boolean addDate, addTime; + public LogLevel logFileLevel; - public boolean parallel; - private String directoryPath; - public boolean setAppendLogfile; - public boolean createParentFolder; + public boolean parallel; + private String directoryPath; + public boolean setAppendLogfile; + public boolean createParentFolder; - public boolean waitingToFinish = true; - public boolean followingAbortRemotely; + public boolean waitingToFinish = true; + public boolean followingAbortRemotely; - public boolean expandingRemoteJob; + public boolean expandingRemoteJob; - private String remoteSlaveServerName; - public boolean passingAllParameters = true; + private String remoteSlaveServerName; + public boolean passingAllParameters = true; - private boolean passingExport; + private boolean passingExport; - public static final LogLevel DEFAULT_LOG_LEVEL = LogLevel.NOTHING; + public static final LogLevel DEFAULT_LOG_LEVEL = LogLevel.NOTHING; - private Job job; + private Job job; - public JobEntryJob( String name ) { - super( name, "" ); - } - - public JobEntryJob() { - this( "" ); - clear(); - } - - private void allocateArgs( int nrArgs ) { - arguments = new String[nrArgs]; - } - - private void allocateParams( int nrParameters ) { - parameters = new String[nrParameters]; - parameterFieldNames = new String[nrParameters]; - parameterValues = new String[nrParameters]; - } - - public Object clone() { - JobEntryJob je = (JobEntryJob) super.clone(); - if ( arguments != null ) { - int nrArgs = arguments.length; - je.allocateArgs( nrArgs ); - System.arraycopy( arguments, 0, je.arguments, 0, nrArgs ); - } - if ( parameters != null ) { - int nrParameters = parameters.length; - je.allocateParams( nrParameters ); - System.arraycopy( parameters, 0, je.parameters, 0, nrParameters ); - System.arraycopy( parameterFieldNames, 0, je.parameterFieldNames, 0, nrParameters ); - System.arraycopy( parameterValues, 0, je.parameterValues, 0, nrParameters ); + public JobEntryJob(String name) { + super(name, ""); } - return je; - } - - public void setFileName( String n ) { - filename = n; - } - - /** - * @deprecated use getFilename() instead. - * @return the filename - */ - @Deprecated - public String getFileName() { - return filename; - } - - public String getFilename() { - return filename; - } - - public String getRealFilename() { - return environmentSubstitute( getFilename() ); - } - - public void setJobName( String jobname ) { - this.jobname = jobname; - } - - public String getJobName() { - return jobname; - } - - public String getDirectory() { - return directory; - } - - public void setDirectory( String directory ) { - this.directory = directory; - } - - public boolean isPassingExport() { - return passingExport; - } - - public void setPassingExport( boolean passingExport ) { - this.passingExport = passingExport; - } - - public String getLogFilename() { - String retval = ""; - if ( setLogfile ) { - retval += logfile == null ? "" : logfile; - Calendar cal = Calendar.getInstance(); - if ( addDate ) { - SimpleDateFormat sdf = new SimpleDateFormat( "yyyyMMdd" ); - retval += "_" + sdf.format( cal.getTime() ); - } - if ( addTime ) { - SimpleDateFormat sdf = new SimpleDateFormat( "HHmmss" ); - retval += "_" + sdf.format( cal.getTime() ); - } - if ( logext != null && logext.length() > 0 ) { - retval += "." + logext; - } + + public JobEntryJob() { + this(""); + clear(); } - return retval; - } - public String getXML() { - StringBuffer retval = new StringBuffer( 200 ); + private void allocateArgs(int nrArgs) { + arguments = new String[nrArgs]; + } - retval.append( super.getXML() ); + private void allocateParams(int nrParameters) { + parameters = new String[nrParameters]; + parameterFieldNames = new String[nrParameters]; + parameterValues = new String[nrParameters]; + } - // specificationMethod - // - retval.append( " " ).append( - XMLHandler.addTagValue( "specification_method", specificationMethod == null ? null : specificationMethod - .getCode() ) ); - retval.append( " " ).append( - XMLHandler.addTagValue( "job_object_id", jobObjectId == null ? null : jobObjectId.toString() ) ); - // Export a little bit of extra information regarding the reference since it doesn't really matter outside the same - // repository. - // - if ( rep != null && jobObjectId != null ) { - try { - RepositoryObject objectInformation = rep.getObjectInformation( jobObjectId, RepositoryObjectType.JOB ); - if ( objectInformation != null ) { - jobname = objectInformation.getName(); - directory = objectInformation.getRepositoryDirectory().getPath(); + public Object clone() { + JobEntryJob je = (JobEntryJob) super.clone(); + if (arguments != null) { + int nrArgs = arguments.length; + je.allocateArgs(nrArgs); + System.arraycopy(arguments, 0, je.arguments, 0, nrArgs); } - } catch ( KettleException e ) { - // Ignore object reference problems. It simply means that the reference is no longer valid. - } + if (parameters != null) { + int nrParameters = parameters.length; + je.allocateParams(nrParameters); + System.arraycopy(parameters, 0, je.parameters, 0, nrParameters); + System.arraycopy(parameterFieldNames, 0, je.parameterFieldNames, 0, nrParameters); + System.arraycopy(parameterValues, 0, je.parameterValues, 0, nrParameters); + } + return je; } - retval.append( " " ).append( XMLHandler.addTagValue( "filename", filename ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "jobname", jobname ) ); - if ( directory != null ) { - retval.append( " " ).append( XMLHandler.addTagValue( "directory", directory ) ); - } else if ( directoryPath != null ) { - retval.append( " " ).append( XMLHandler.addTagValue( "directory", directoryPath ) ); - } - retval.append( " " ).append( XMLHandler.addTagValue( "arg_from_previous", argFromPrevious ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "params_from_previous", paramsFromPrevious ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "exec_per_row", execPerRow ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "set_logfile", setLogfile ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "logfile", logfile ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "logext", logext ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "add_date", addDate ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "add_time", addTime ) ); - retval.append( " " ).append( - XMLHandler.addTagValue( "loglevel", logFileLevel != null ? logFileLevel.getCode() : DEFAULT_LOG_LEVEL - .getCode() ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "slave_server_name", remoteSlaveServerName ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "wait_until_finished", waitingToFinish ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "follow_abort_remote", followingAbortRemotely ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "expand_remote_job", expandingRemoteJob ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "create_parent_folder", createParentFolder ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "pass_export", passingExport ) ); - - if ( arguments != null ) { - for ( int i = 0; i < arguments.length; i++ ) { - // This is a very very bad way of making an XML file, don't use it (or - // copy it). Sven Boden - retval.append( " " ).append( XMLHandler.addTagValue( "argument" + i, arguments[i] ) ); - } + public void setFileName(String n) { + filename = n; } - if ( parameters != null ) { - retval.append( " " ).append( XMLHandler.openTag( "parameters" ) ); - - retval.append( " " ).append( XMLHandler.addTagValue( "pass_all_parameters", passingAllParameters ) ); - - for ( int i = 0; i < parameters.length; i++ ) { - // This is a better way of making the XML file than the arguments. - retval.append( " " ).append( XMLHandler.openTag( "parameter" ) ); - - retval.append( " " ).append( XMLHandler.addTagValue( "name", parameters[i] ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "stream_name", parameterFieldNames[i] ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "value", parameterValues[i] ) ); - - retval.append( " " ).append( XMLHandler.closeTag( "parameter" ) ); - } - retval.append( " " ).append( XMLHandler.closeTag( "parameters" ) ); + /** + * @return the filename + * @deprecated use getFilename() instead. + */ + @Deprecated + public String getFileName() { + return filename; } - retval.append( " " ).append( XMLHandler.addTagValue( "set_append_logfile", setAppendLogfile ) ); - return retval.toString(); - } + public String getFilename() { + return filename; + } - private void checkObjectLocationSpecificationMethod() { - if ( specificationMethod == null ) { - // Backward compatibility - // - // Default = Filename - // - specificationMethod = ObjectLocationSpecificationMethod.FILENAME; + public String getRealFilename() { + return environmentSubstitute(getFilename()); + } - if ( !Const.isEmpty( filename ) ) { - specificationMethod = ObjectLocationSpecificationMethod.FILENAME; - } else if ( jobObjectId != null ) { - specificationMethod = ObjectLocationSpecificationMethod.REPOSITORY_BY_REFERENCE; - } else if ( !Const.isEmpty( jobname ) ) { - specificationMethod = ObjectLocationSpecificationMethod.REPOSITORY_BY_NAME; - } + public void setJobName(String jobname) { + this.jobname = jobname; } - } - - public void loadXML( Node entrynode, List databases, List slaveServers, - Repository rep, IMetaStore metaStore ) throws KettleXMLException { - try { - super.loadXML( entrynode, databases, slaveServers ); - - String method = XMLHandler.getTagValue( entrynode, "specification_method" ); - specificationMethod = ObjectLocationSpecificationMethod.getSpecificationMethodByCode( method ); - - String jobId = XMLHandler.getTagValue( entrynode, "job_object_id" ); - jobObjectId = Const.isEmpty( jobId ) ? null : new StringObjectId( jobId ); - filename = XMLHandler.getTagValue( entrynode, "filename" ); - jobname = XMLHandler.getTagValue( entrynode, "jobname" ); - - if ( rep != null && rep.isConnected() && !Const.isEmpty( jobname ) ) { - specificationMethod = ObjectLocationSpecificationMethod.REPOSITORY_BY_NAME; - } - - // Backward compatibility check for object specification - // - checkObjectLocationSpecificationMethod(); - - argFromPrevious = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "arg_from_previous" ) ); - paramsFromPrevious = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "params_from_previous" ) ); - execPerRow = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "exec_per_row" ) ); - setLogfile = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "set_logfile" ) ); - addDate = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "add_date" ) ); - addTime = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "add_time" ) ); - logfile = XMLHandler.getTagValue( entrynode, "logfile" ); - logext = XMLHandler.getTagValue( entrynode, "logext" ); - logFileLevel = LogLevel.getLogLevelForCode( XMLHandler.getTagValue( entrynode, "loglevel" ) ); - setAppendLogfile = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "set_append_logfile" ) ); - remoteSlaveServerName = XMLHandler.getTagValue( entrynode, "slave_server_name" ); - passingExport = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "pass_export" ) ); - directory = XMLHandler.getTagValue( entrynode, "directory" ); - createParentFolder = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "create_parent_folder" ) ); - - String wait = XMLHandler.getTagValue( entrynode, "wait_until_finished" ); - if ( Const.isEmpty( wait ) ) { - waitingToFinish = true; - } else { - waitingToFinish = "Y".equalsIgnoreCase( wait ); - } - - followingAbortRemotely = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "follow_abort_remote" ) ); - expandingRemoteJob = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "expand_remote_job" ) ); - - // How many arguments? - int argnr = 0; - while ( XMLHandler.getTagValue( entrynode, "argument" + argnr ) != null ) { - argnr++; - } - allocateArgs( argnr ); - - // Read them all... This is a very BAD way to do it by the way. Sven - // Boden. - for ( int a = 0; a < argnr; a++ ) { - arguments[a] = XMLHandler.getTagValue( entrynode, "argument" + a ); - } - - Node parametersNode = XMLHandler.getSubNode( entrynode, "parameters" ); - - String passAll = XMLHandler.getTagValue( parametersNode, "pass_all_parameters" ); - passingAllParameters = Const.isEmpty( passAll ) || "Y".equalsIgnoreCase( passAll ); - - int nrParameters = XMLHandler.countNodes( parametersNode, "parameter" ); - allocateParams( nrParameters ); - - for ( int i = 0; i < nrParameters; i++ ) { - Node knode = XMLHandler.getSubNodeByNr( parametersNode, "parameter", i ); - - parameters[i] = XMLHandler.getTagValue( knode, "name" ); - parameterFieldNames[i] = XMLHandler.getTagValue( knode, "stream_name" ); - parameterValues[i] = XMLHandler.getTagValue( knode, "value" ); - } - } catch ( KettleXMLException xe ) { - throw new KettleXMLException( "Unable to load 'job' job entry from XML node", xe ); + + public String getJobName() { + return jobname; } - } - - /** - * Load the jobentry from repository - */ - public void loadRep( Repository rep, IMetaStore metaStore, ObjectId id_jobentry, List databases, - List slaveServers ) throws KettleException { - try { - String method = rep.getJobEntryAttributeString( id_jobentry, "specification_method" ); - specificationMethod = ObjectLocationSpecificationMethod.getSpecificationMethodByCode( method ); - String jobId = rep.getJobEntryAttributeString( id_jobentry, "job_object_id" ); - jobObjectId = Const.isEmpty( jobId ) ? null : new StringObjectId( jobId ); - jobname = rep.getJobEntryAttributeString( id_jobentry, "name" ); - directory = rep.getJobEntryAttributeString( id_jobentry, "dir_path" ); - filename = rep.getJobEntryAttributeString( id_jobentry, "file_name" ); - - // Backward compatibility check for object specification - // - checkObjectLocationSpecificationMethod(); - - argFromPrevious = rep.getJobEntryAttributeBoolean( id_jobentry, "arg_from_previous" ); - paramsFromPrevious = rep.getJobEntryAttributeBoolean( id_jobentry, "params_from_previous" ); - execPerRow = rep.getJobEntryAttributeBoolean( id_jobentry, "exec_per_row" ); - setLogfile = rep.getJobEntryAttributeBoolean( id_jobentry, "set_logfile" ); - addDate = rep.getJobEntryAttributeBoolean( id_jobentry, "add_date" ); - addTime = rep.getJobEntryAttributeBoolean( id_jobentry, "add_time" ); - logfile = rep.getJobEntryAttributeString( id_jobentry, "logfile" ); - logext = rep.getJobEntryAttributeString( id_jobentry, "logext" ); - logFileLevel = LogLevel.getLogLevelForCode( rep.getJobEntryAttributeString( id_jobentry, "loglevel" ) ); - setAppendLogfile = rep.getJobEntryAttributeBoolean( id_jobentry, "set_append_logfile" ); - remoteSlaveServerName = rep.getJobEntryAttributeString( id_jobentry, "slave_server_name" ); - passingExport = rep.getJobEntryAttributeBoolean( id_jobentry, "pass_export" ); - waitingToFinish = rep.getJobEntryAttributeBoolean( id_jobentry, "wait_until_finished", true ); - followingAbortRemotely = rep.getJobEntryAttributeBoolean( id_jobentry, "follow_abort_remote" ); - expandingRemoteJob = rep.getJobEntryAttributeBoolean( id_jobentry, "expand_remote_job" ); - createParentFolder = rep.getJobEntryAttributeBoolean( id_jobentry, "create_parent_folder" ); - - // How many arguments? - int argnr = rep.countNrJobEntryAttributes( id_jobentry, "argument" ); - allocateArgs( argnr ); - - // Read all arguments ... - for ( int a = 0; a < argnr; a++ ) { - arguments[a] = rep.getJobEntryAttributeString( id_jobentry, a, "argument" ); - } - - // How many arguments? - int parameternr = rep.countNrJobEntryAttributes( id_jobentry, "parameter_name" ); - allocateParams( parameternr ); - - // Read all parameters ... - for ( int a = 0; a < parameternr; a++ ) { - parameters[a] = rep.getJobEntryAttributeString( id_jobentry, a, "parameter_name" ); - parameterFieldNames[a] = rep.getJobEntryAttributeString( id_jobentry, a, "parameter_stream_name" ); - parameterValues[a] = rep.getJobEntryAttributeString( id_jobentry, a, "parameter_value" ); - } - - passingAllParameters = rep.getJobEntryAttributeBoolean( id_jobentry, "pass_all_parameters", true ); - - } catch ( KettleDatabaseException dbe ) { - throw new KettleException( "Unable to load job entry of type 'job' from the repository with id_jobentry=" - + id_jobentry, dbe ); + + public String getDirectory() { + return directory; } - } - - // Save the attributes of this job entry - // - public void saveRep( Repository rep, IMetaStore metaStore, ObjectId id_job ) throws KettleException { - try { - rep.saveJobEntryAttribute( id_job, getObjectId(), "specification_method", specificationMethod == null - ? null : specificationMethod.getCode() ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "job_object_id", jobObjectId == null ? null : jobObjectId - .toString() ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "name", getJobName() ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "dir_path", getDirectory() != null ? getDirectory() : "" ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "file_name", filename ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "arg_from_previous", argFromPrevious ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "params_from_previous", paramsFromPrevious ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "exec_per_row", execPerRow ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "set_logfile", setLogfile ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "add_date", addDate ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "add_time", addTime ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "logfile", logfile ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "logext", logext ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "set_append_logfile", setAppendLogfile ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "loglevel", logFileLevel != null - ? logFileLevel.getCode() : JobEntryJob.DEFAULT_LOG_LEVEL.getCode() ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "slave_server_name", remoteSlaveServerName ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "pass_export", passingExport ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "wait_until_finished", waitingToFinish ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "follow_abort_remote", followingAbortRemotely ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "expand_remote_job", expandingRemoteJob ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "create_parent_folder", createParentFolder ); - - // save the arguments... - if ( arguments != null ) { - for ( int i = 0; i < arguments.length; i++ ) { - rep.saveJobEntryAttribute( id_job, getObjectId(), i, "argument", arguments[i] ); - } - } - - // save the parameters... - if ( parameters != null ) { - for ( int i = 0; i < parameters.length; i++ ) { - rep.saveJobEntryAttribute( id_job, getObjectId(), i, "parameter_name", parameters[i] ); - rep.saveJobEntryAttribute( id_job, getObjectId(), i, "parameter_stream_name", Const.NVL( - parameterFieldNames[i], "" ) ); - rep.saveJobEntryAttribute( id_job, getObjectId(), i, "parameter_value", Const.NVL( - parameterValues[i], "" ) ); - } - } - rep.saveJobEntryAttribute( id_job, getObjectId(), "pass_all_parameters", passingAllParameters ); - } catch ( KettleDatabaseException dbe ) { - throw new KettleException( - "Unable to save job entry of type job to the repository with id_job=" + id_job, dbe ); + public void setDirectory(String directory) { + this.directory = directory; } - } - - public Result execute( Result result, int nr ) throws KettleException { - result.setEntryNr( nr ); - - LogChannelFileWriter logChannelFileWriter = null; - - LogLevel jobLogLevel = parentJob.getLogLevel(); - if ( setLogfile ) { - String realLogFilename = environmentSubstitute( getLogFilename() ); - // We need to check here the log filename - // if we do not have one, we must fail - if ( Const.isEmpty( realLogFilename ) ) { - logError( BaseMessages.getString( PKG, "JobJob.Exception.LogFilenameMissing" ) ); - result.setNrErrors( 1 ); - result.setResult( false ); - return result; - } - // create parent folder? - if ( !createParentFolder( realLogFilename ) ) { - result.setNrErrors( 1 ); - result.setResult( false ); - return result; - } - try { - logChannelFileWriter = - new LogChannelFileWriter( - this.getLogChannelId(), KettleVFS.getFileObject( realLogFilename ), setAppendLogfile ); - logChannelFileWriter.startLogging(); - } catch ( KettleException e ) { - logError( "Unable to open file appender for file [" + getLogFilename() + "] : " + e.toString() ); - logError( Const.getStackTracker( e ) ); - result.setNrErrors( 1 ); - result.setResult( false ); - return result; - } - jobLogLevel = logFileLevel; + public boolean isPassingExport() { + return passingExport; } - // Figure out the remote slave server... - // - SlaveServer remoteSlaveServer = null; - if ( !Const.isEmpty( remoteSlaveServerName ) ) { - String realRemoteSlaveServerName = environmentSubstitute( remoteSlaveServerName ); - remoteSlaveServer = parentJob.getJobMeta().findSlaveServer( realRemoteSlaveServerName ); - if ( remoteSlaveServer == null ) { - throw new KettleException( BaseMessages.getString( - PKG, "JobJob.Exception.UnableToFindRemoteSlaveServer", realRemoteSlaveServerName ) ); - } + public void setPassingExport(boolean passingExport) { + this.passingExport = passingExport; } - try { - // First load the job, outside of the loop... - if ( parentJob.getJobMeta() != null ) { - // reset the internal variables again. - // Maybe we should split up the variables even more like in UNIX shells. - // The internal variables need to be reset to be able use them properly - // in 2 sequential sub jobs. - parentJob.getJobMeta().setInternalKettleVariables(); - } - - // Explain what we are loading... - // - switch ( specificationMethod ) { - case REPOSITORY_BY_NAME: - if ( log.isDetailed() ) { - logDetailed( "Loading job from repository : [" - + directory + " : " + environmentSubstitute( jobname ) + "]" ); - } - break; - case FILENAME: - if ( log.isDetailed() ) { - logDetailed( "Loading job from XML file : [" + environmentSubstitute( filename ) + "]" ); - } - break; - case REPOSITORY_BY_REFERENCE: - if ( log.isDetailed() ) { - logDetailed( "Loading job from repository by reference : [" + jobObjectId + "]" ); - } - break; - default: - break; - } - - JobMeta jobMeta = getJobMeta( rep, this ); - - // Verify that we loaded something, complain if we did not... - // - if ( jobMeta == null ) { - throw new KettleException( - "Unable to load the job: please specify the name and repository directory OR a filename" ); - } - - verifyRecursiveExecution( parentJob, jobMeta ); - - int iteration = 0; - String[] args1 = arguments; - // no arguments? Check the parent jobs arguments - if ( args1 == null || args1.length == 0 ) { - args1 = parentJob.getArguments(); - } - - copyVariablesFrom( parentJob ); - setParentVariableSpace( parentJob ); - - // - // For the moment only do variable translation at the start of a job, not - // for every input row (if that would be switched on) - // - String[] args = null; - if ( args1 != null ) { - args = new String[args1.length]; - for ( int idx = 0; idx < args1.length; idx++ ) { - args[idx] = environmentSubstitute( args1[idx] ); + + public String getLogFilename() { + String retval = ""; + if (setLogfile) { + retval += logfile == null ? "" : logfile; + Calendar cal = Calendar.getInstance(); + if (addDate) { + SimpleDateFormat sdf = new SimpleDateFormat("yyyyMMdd"); + retval += "_" + sdf.format(cal.getTime()); + } + if (addTime) { + SimpleDateFormat sdf = new SimpleDateFormat("HHmmss"); + retval += "_" + sdf.format(cal.getTime()); + } + if (logext != null && logext.length() > 0) { + retval += "." + logext; + } } - } + return retval; + } - RowMetaAndData resultRow = null; - boolean first = true; - List rows = new ArrayList( result.getRows() ); + public String getXML() { + StringBuffer retval = new StringBuffer(200); - while ( ( first && !execPerRow ) - || ( execPerRow && rows != null && iteration < rows.size() && result.getNrErrors() == 0 ) ) { - first = false; + retval.append(super.getXML()); - // Clear the result rows of the result - // Otherwise we double the amount of rows every iteration in the simple cases. + // specificationMethod + // + retval.append(" ").append( + XMLHandler.addTagValue("specification_method", specificationMethod == null ? null : specificationMethod + .getCode())); + retval.append(" ").append( + XMLHandler.addTagValue("job_object_id", jobObjectId == null ? null : jobObjectId.toString())); + // Export a little bit of extra information regarding the reference since it doesn't really matter outside the same + // repository. // - if ( execPerRow ) { - result.getRows().clear(); + if (rep != null && jobObjectId != null) { + try { + RepositoryObject objectInformation = rep.getObjectInformation(jobObjectId, RepositoryObjectType.JOB); + if (objectInformation != null) { + jobname = objectInformation.getName(); + directory = objectInformation.getRepositoryDirectory().getPath(); + } + } catch (KettleException e) { + // Ignore object reference problems. It simply means that the reference is no longer valid. + } } + retval.append(" ").append(XMLHandler.addTagValue("filename", filename)); + retval.append(" ").append(XMLHandler.addTagValue("jobname", jobname)); - if ( rows != null && execPerRow ) { - resultRow = rows.get( iteration ); - } else { - resultRow = null; + if (directory != null) { + retval.append(" ").append(XMLHandler.addTagValue("directory", directory)); + } else if (directoryPath != null) { + retval.append(" ").append(XMLHandler.addTagValue("directory", directoryPath)); + } + retval.append(" ").append(XMLHandler.addTagValue("arg_from_previous", argFromPrevious)); + retval.append(" ").append(XMLHandler.addTagValue("params_from_previous", paramsFromPrevious)); + retval.append(" ").append(XMLHandler.addTagValue("exec_per_row", execPerRow)); + retval.append(" ").append(XMLHandler.addTagValue("set_logfile", setLogfile)); + retval.append(" ").append(XMLHandler.addTagValue("logfile", logfile)); + retval.append(" ").append(XMLHandler.addTagValue("logext", logext)); + retval.append(" ").append(XMLHandler.addTagValue("add_date", addDate)); + retval.append(" ").append(XMLHandler.addTagValue("add_time", addTime)); + retval.append(" ").append( + XMLHandler.addTagValue("loglevel", logFileLevel != null ? logFileLevel.getCode() : DEFAULT_LOG_LEVEL + .getCode())); + retval.append(" ").append(XMLHandler.addTagValue("slave_server_name", remoteSlaveServerName)); + retval.append(" ").append(XMLHandler.addTagValue("wait_until_finished", waitingToFinish)); + retval.append(" ").append(XMLHandler.addTagValue("follow_abort_remote", followingAbortRemotely)); + retval.append(" ").append(XMLHandler.addTagValue("expand_remote_job", expandingRemoteJob)); + retval.append(" ").append(XMLHandler.addTagValue("create_parent_folder", createParentFolder)); + retval.append(" ").append(XMLHandler.addTagValue("pass_export", passingExport)); + + if (arguments != null) { + for (int i = 0; i < arguments.length; i++) { + // This is a very very bad way of making an XML file, don't use it (or + // copy it). Sven Boden + retval.append(" ").append(XMLHandler.addTagValue("argument" + i, arguments[i])); + } } - NamedParams namedParam = new NamedParamsDefault(); + if (parameters != null) { + retval.append(" ").append(XMLHandler.openTag("parameters")); - // First (optionally) copy all the parameter values from the parent job - // - if ( paramsFromPrevious ) { - String[] parentParameters = parentJob.listParameters(); - for ( int idx = 0; idx < parentParameters.length; idx++ ) { - String par = parentParameters[idx]; - String def = parentJob.getParameterDefault( par ); - String val = parentJob.getParameterValue( par ); - String des = parentJob.getParameterDescription( par ); - - namedParam.addParameterDefinition( par, def, des ); - namedParam.setParameterValue( par, val ); - } + retval.append(" ").append(XMLHandler.addTagValue("pass_all_parameters", passingAllParameters)); + + for (int i = 0; i < parameters.length; i++) { + // This is a better way of making the XML file than the arguments. + retval.append(" ").append(XMLHandler.openTag("parameter")); + + retval.append(" ").append(XMLHandler.addTagValue("name", parameters[i])); + retval.append(" ").append(XMLHandler.addTagValue("stream_name", parameterFieldNames[i])); + retval.append(" ").append(XMLHandler.addTagValue("value", parameterValues[i])); + + retval.append(" ").append(XMLHandler.closeTag("parameter")); + } + retval.append(" ").append(XMLHandler.closeTag("parameters")); } + retval.append(" ").append(XMLHandler.addTagValue("set_append_logfile", setAppendLogfile)); - // Now add those parameter values specified by the user in the job entry - // - if ( parameters != null ) { - for ( int idx = 0; idx < parameters.length; idx++ ) { - if ( !Const.isEmpty( parameters[idx] ) ) { - - // If it's not yet present in the parent job, add it... - // - if ( Const.indexOfString( parameters[idx], namedParam.listParameters() ) < 0 ) { - // We have a parameter - try { - namedParam.addParameterDefinition( parameters[idx], "", "Job entry runtime" ); - } catch ( DuplicateParamException e ) { - // Should never happen - // - logError( "Duplicate parameter definition for " + parameters[idx] ); - } - } + return retval.toString(); + } - if ( Const.isEmpty( Const.trim( parameterFieldNames[idx] ) ) ) { - namedParam.setParameterValue( parameters[idx], Const.NVL( - environmentSubstitute( parameterValues[idx] ), "" ) ); - } else { - // something filled in, in the field column... - // - String value = ""; - if ( resultRow != null ) { - value = resultRow.getString( parameterFieldNames[idx], "" ); - } - namedParam.setParameterValue( parameters[idx], value ); - } + private void checkObjectLocationSpecificationMethod() { + if (specificationMethod == null) { + // Backward compatibility + // + // Default = Filename + // + specificationMethod = ObjectLocationSpecificationMethod.FILENAME; + + if (!Const.isEmpty(filename)) { + specificationMethod = ObjectLocationSpecificationMethod.FILENAME; + } else if (jobObjectId != null) { + specificationMethod = ObjectLocationSpecificationMethod.REPOSITORY_BY_REFERENCE; + } else if (!Const.isEmpty(jobname)) { + specificationMethod = ObjectLocationSpecificationMethod.REPOSITORY_BY_NAME; } - } } + } - Result oneResult = new Result(); + public void loadXML(Node entrynode, List databases, List slaveServers, + Repository rep, IMetaStore metaStore) throws KettleXMLException { + try { + super.loadXML(entrynode, databases, slaveServers); - List sourceRows = null; + String method = XMLHandler.getTagValue(entrynode, "specification_method"); + specificationMethod = ObjectLocationSpecificationMethod.getSpecificationMethodByCode(method); - if ( execPerRow ) { - // Execute for each input row + String jobId = XMLHandler.getTagValue(entrynode, "job_object_id"); + jobObjectId = Const.isEmpty(jobId) ? null : new StringObjectId(jobId); + filename = XMLHandler.getTagValue(entrynode, "filename"); + jobname = XMLHandler.getTagValue(entrynode, "jobname"); - if ( argFromPrevious ) { - // Copy the input row to the (command line) arguments + if (rep != null && rep.isConnected() && !Const.isEmpty(jobname)) { + specificationMethod = ObjectLocationSpecificationMethod.REPOSITORY_BY_NAME; + } - args = null; - if ( resultRow != null ) { - args = new String[resultRow.size()]; - for ( int i = 0; i < resultRow.size(); i++ ) { - args[i] = resultRow.getString( i, null ); - } + // Backward compatibility check for object specification + // + checkObjectLocationSpecificationMethod(); + + argFromPrevious = "Y".equalsIgnoreCase(XMLHandler.getTagValue(entrynode, "arg_from_previous")); + paramsFromPrevious = "Y".equalsIgnoreCase(XMLHandler.getTagValue(entrynode, "params_from_previous")); + execPerRow = "Y".equalsIgnoreCase(XMLHandler.getTagValue(entrynode, "exec_per_row")); + setLogfile = "Y".equalsIgnoreCase(XMLHandler.getTagValue(entrynode, "set_logfile")); + addDate = "Y".equalsIgnoreCase(XMLHandler.getTagValue(entrynode, "add_date")); + addTime = "Y".equalsIgnoreCase(XMLHandler.getTagValue(entrynode, "add_time")); + logfile = XMLHandler.getTagValue(entrynode, "logfile"); + logext = XMLHandler.getTagValue(entrynode, "logext"); + logFileLevel = LogLevel.getLogLevelForCode(XMLHandler.getTagValue(entrynode, "loglevel")); + setAppendLogfile = "Y".equalsIgnoreCase(XMLHandler.getTagValue(entrynode, "set_append_logfile")); + remoteSlaveServerName = XMLHandler.getTagValue(entrynode, "slave_server_name"); + passingExport = "Y".equalsIgnoreCase(XMLHandler.getTagValue(entrynode, "pass_export")); + directory = XMLHandler.getTagValue(entrynode, "directory"); + createParentFolder = "Y".equalsIgnoreCase(XMLHandler.getTagValue(entrynode, "create_parent_folder")); + + String wait = XMLHandler.getTagValue(entrynode, "wait_until_finished"); + if (Const.isEmpty(wait)) { + waitingToFinish = true; + } else { + waitingToFinish = "Y".equalsIgnoreCase(wait); } - } else { - // Just pass a single row - List newList = new ArrayList(); - newList.add( resultRow ); - sourceRows = newList; - } - - if ( paramsFromPrevious ) { // Copy the input the parameters - - if ( parameters != null ) { - for ( int idx = 0; idx < parameters.length; idx++ ) { - if ( !Const.isEmpty( parameters[idx] ) ) { - // We have a parameter - if ( Const.isEmpty( Const.trim( parameterFieldNames[idx] ) ) ) { - namedParam.setParameterValue( parameters[idx], Const.NVL( - environmentSubstitute( parameterValues[idx] ), "" ) ); - } else { - String fieldValue = ""; - - if ( resultRow != null ) { - fieldValue = resultRow.getString( parameterFieldNames[idx], "" ); - } - // Get the value from the input stream - namedParam.setParameterValue( parameters[idx], Const.NVL( fieldValue, "" ) ); - } - } - } + + followingAbortRemotely = "Y".equalsIgnoreCase(XMLHandler.getTagValue(entrynode, "follow_abort_remote")); + expandingRemoteJob = "Y".equalsIgnoreCase(XMLHandler.getTagValue(entrynode, "expand_remote_job")); + + // How many arguments? + int argnr = 0; + while (XMLHandler.getTagValue(entrynode, "argument" + argnr) != null) { + argnr++; } - } - } else { - if ( argFromPrevious ) { - // Only put the first Row on the arguments - args = null; - if ( resultRow != null ) { - args = new String[resultRow.size()]; - for ( int i = 0; i < resultRow.size(); i++ ) { - args[i] = resultRow.getString( i, null ); - } + allocateArgs(argnr); + + // Read them all... This is a very BAD way to do it by the way. Sven + // Boden. + for (int a = 0; a < argnr; a++) { + arguments[a] = XMLHandler.getTagValue(entrynode, "argument" + a); } - } else { - // Keep it as it was... - sourceRows = result.getRows(); - } - - if ( paramsFromPrevious ) { // Copy the input the parameters - - if ( parameters != null ) { - for ( int idx = 0; idx < parameters.length; idx++ ) { - if ( !Const.isEmpty( parameters[idx] ) ) { - // We have a parameter - if ( Const.isEmpty( Const.trim( parameterFieldNames[idx] ) ) ) { - namedParam.setParameterValue( parameters[idx], Const.NVL( - environmentSubstitute( parameterValues[idx] ), "" ) ); - } else { - String fieldValue = ""; - - if ( resultRow != null ) { - fieldValue = resultRow.getString( parameterFieldNames[idx], "" ); - } - // Get the value from the input stream - namedParam.setParameterValue( parameters[idx], Const.NVL( fieldValue, "" ) ); - } - } - } + + Node parametersNode = XMLHandler.getSubNode(entrynode, "parameters"); + + String passAll = XMLHandler.getTagValue(parametersNode, "pass_all_parameters"); + passingAllParameters = Const.isEmpty(passAll) || "Y".equalsIgnoreCase(passAll); + + int nrParameters = XMLHandler.countNodes(parametersNode, "parameter"); + allocateParams(nrParameters); + + for (int i = 0; i < nrParameters; i++) { + Node knode = XMLHandler.getSubNodeByNr(parametersNode, "parameter", i); + + parameters[i] = XMLHandler.getTagValue(knode, "name"); + parameterFieldNames[i] = XMLHandler.getTagValue(knode, "stream_name"); + parameterValues[i] = XMLHandler.getTagValue(knode, "value"); } - } + } catch (KettleXMLException xe) { + throw new KettleXMLException("Unable to load 'job' job entry from XML node", xe); } + } - if ( remoteSlaveServer == null ) { - // Local execution... - // - - // Create a new job - // - job = new Job( rep, jobMeta, this ); - job.setParentJob( parentJob ); - job.setLogLevel( jobLogLevel ); - job.shareVariablesWith( this ); - job.setInternalKettleVariables( this ); - job.copyParametersFrom( jobMeta ); - job.setInteractive( parentJob.isInteractive() ); - if ( job.isInteractive() ) { - job.getJobEntryListeners().addAll( parentJob.getJobEntryListeners() ); - } - - // Pass the socket repository all around. - // - job.setSocketRepository( parentJob.getSocketRepository() ); - - // Set the parameters calculated above on this instance. - // - job.clearParameters(); - String[] parameterNames = job.listParameters(); - for ( int idx = 0; idx < parameterNames.length; idx++ ) { - // Grab the parameter value set in the job entry + /** + * Load the jobentry from repository + */ + public void loadRep(Repository rep, IMetaStore metaStore, ObjectId id_jobentry, List databases, + List slaveServers) throws KettleException { + try { + String method = rep.getJobEntryAttributeString(id_jobentry, "specification_method"); + specificationMethod = ObjectLocationSpecificationMethod.getSpecificationMethodByCode(method); + String jobId = rep.getJobEntryAttributeString(id_jobentry, "job_object_id"); + jobObjectId = Const.isEmpty(jobId) ? null : new StringObjectId(jobId); + jobname = rep.getJobEntryAttributeString(id_jobentry, "name"); + directory = rep.getJobEntryAttributeString(id_jobentry, "dir_path"); + filename = rep.getJobEntryAttributeString(id_jobentry, "file_name"); + + // Backward compatibility check for object specification // - String thisValue = namedParam.getParameterValue( parameterNames[idx] ); - if ( !Const.isEmpty( thisValue ) ) { - // Set the value as specified by the user in the job entry - // - job.setParameterValue( parameterNames[idx], thisValue ); - } else { - // See if the parameter had a value set in the parent job... - // This value should pass down to the sub-job if that's what we - // opted to do. - // - if ( isPassingAllParameters() ) { - String parentValue = parentJob.getParameterValue( parameterNames[idx] ); - if ( !Const.isEmpty( parentValue ) ) { - job.setParameterValue( parameterNames[idx], parentValue ); - } - } + checkObjectLocationSpecificationMethod(); + + argFromPrevious = rep.getJobEntryAttributeBoolean(id_jobentry, "arg_from_previous"); + paramsFromPrevious = rep.getJobEntryAttributeBoolean(id_jobentry, "params_from_previous"); + execPerRow = rep.getJobEntryAttributeBoolean(id_jobentry, "exec_per_row"); + setLogfile = rep.getJobEntryAttributeBoolean(id_jobentry, "set_logfile"); + addDate = rep.getJobEntryAttributeBoolean(id_jobentry, "add_date"); + addTime = rep.getJobEntryAttributeBoolean(id_jobentry, "add_time"); + logfile = rep.getJobEntryAttributeString(id_jobentry, "logfile"); + logext = rep.getJobEntryAttributeString(id_jobentry, "logext"); + logFileLevel = LogLevel.getLogLevelForCode(rep.getJobEntryAttributeString(id_jobentry, "loglevel")); + setAppendLogfile = rep.getJobEntryAttributeBoolean(id_jobentry, "set_append_logfile"); + remoteSlaveServerName = rep.getJobEntryAttributeString(id_jobentry, "slave_server_name"); + passingExport = rep.getJobEntryAttributeBoolean(id_jobentry, "pass_export"); + waitingToFinish = rep.getJobEntryAttributeBoolean(id_jobentry, "wait_until_finished", true); + followingAbortRemotely = rep.getJobEntryAttributeBoolean(id_jobentry, "follow_abort_remote"); + expandingRemoteJob = rep.getJobEntryAttributeBoolean(id_jobentry, "expand_remote_job"); + createParentFolder = rep.getJobEntryAttributeBoolean(id_jobentry, "create_parent_folder"); + + // How many arguments? + int argnr = rep.countNrJobEntryAttributes(id_jobentry, "argument"); + allocateArgs(argnr); + + // Read all arguments ... + for (int a = 0; a < argnr; a++) { + arguments[a] = rep.getJobEntryAttributeString(id_jobentry, a, "argument"); } - } - job.activateParameters(); - // Set the source rows we calculated above... - // - job.setSourceRows( sourceRows ); + // How many arguments? + int parameternr = rep.countNrJobEntryAttributes(id_jobentry, "parameter_name"); + allocateParams(parameternr); - // Don't forget the logging... - job.beginProcessing(); + // Read all parameters ... + for (int a = 0; a < parameternr; a++) { + parameters[a] = rep.getJobEntryAttributeString(id_jobentry, a, "parameter_name"); + parameterFieldNames[a] = rep.getJobEntryAttributeString(id_jobentry, a, "parameter_stream_name"); + parameterValues[a] = rep.getJobEntryAttributeString(id_jobentry, a, "parameter_value"); + } - // Link the job with the sub-job - parentJob.getJobTracker().addJobTracker( job.getJobTracker() ); + passingAllParameters = rep.getJobEntryAttributeBoolean(id_jobentry, "pass_all_parameters", true); - // Link both ways! - job.getJobTracker().setParentJobTracker( parentJob.getJobTracker() ); + } catch (KettleDatabaseException dbe) { + throw new KettleException("Unable to load job entry of type 'job' from the repository with id_jobentry=" + + id_jobentry, dbe); + } + } - if ( parentJob.getJobMeta().isBatchIdPassed() ) { - job.setPassedBatchId( parentJob.getBatchId() ); - } + // Save the attributes of this job entry + // + public void saveRep(Repository rep, IMetaStore metaStore, ObjectId id_job) throws KettleException { + try { + rep.saveJobEntryAttribute(id_job, getObjectId(), "specification_method", specificationMethod == null + ? null : specificationMethod.getCode()); + rep.saveJobEntryAttribute(id_job, getObjectId(), "job_object_id", jobObjectId == null ? null : jobObjectId + .toString()); + rep.saveJobEntryAttribute(id_job, getObjectId(), "name", getJobName()); + rep.saveJobEntryAttribute(id_job, getObjectId(), "dir_path", getDirectory() != null ? getDirectory() : ""); + rep.saveJobEntryAttribute(id_job, getObjectId(), "file_name", filename); + rep.saveJobEntryAttribute(id_job, getObjectId(), "arg_from_previous", argFromPrevious); + rep.saveJobEntryAttribute(id_job, getObjectId(), "params_from_previous", paramsFromPrevious); + rep.saveJobEntryAttribute(id_job, getObjectId(), "exec_per_row", execPerRow); + rep.saveJobEntryAttribute(id_job, getObjectId(), "set_logfile", setLogfile); + rep.saveJobEntryAttribute(id_job, getObjectId(), "add_date", addDate); + rep.saveJobEntryAttribute(id_job, getObjectId(), "add_time", addTime); + rep.saveJobEntryAttribute(id_job, getObjectId(), "logfile", logfile); + rep.saveJobEntryAttribute(id_job, getObjectId(), "logext", logext); + rep.saveJobEntryAttribute(id_job, getObjectId(), "set_append_logfile", setAppendLogfile); + rep.saveJobEntryAttribute(id_job, getObjectId(), "loglevel", logFileLevel != null + ? logFileLevel.getCode() : JobEntryJob.DEFAULT_LOG_LEVEL.getCode()); + rep.saveJobEntryAttribute(id_job, getObjectId(), "slave_server_name", remoteSlaveServerName); + rep.saveJobEntryAttribute(id_job, getObjectId(), "pass_export", passingExport); + rep.saveJobEntryAttribute(id_job, getObjectId(), "wait_until_finished", waitingToFinish); + rep.saveJobEntryAttribute(id_job, getObjectId(), "follow_abort_remote", followingAbortRemotely); + rep.saveJobEntryAttribute(id_job, getObjectId(), "expand_remote_job", expandingRemoteJob); + rep.saveJobEntryAttribute(id_job, getObjectId(), "create_parent_folder", createParentFolder); + + // save the arguments... + if (arguments != null) { + for (int i = 0; i < arguments.length; i++) { + rep.saveJobEntryAttribute(id_job, getObjectId(), i, "argument", arguments[i]); + } + } - job.setArguments( args ); + // save the parameters... + if (parameters != null) { + for (int i = 0; i < parameters.length; i++) { + rep.saveJobEntryAttribute(id_job, getObjectId(), i, "parameter_name", parameters[i]); + rep.saveJobEntryAttribute(id_job, getObjectId(), i, "parameter_stream_name", Const.NVL( + parameterFieldNames[i], "")); + rep.saveJobEntryAttribute(id_job, getObjectId(), i, "parameter_value", Const.NVL( + parameterValues[i], "")); + } + } - // Inform the parent job we started something here... - // - for ( DelegationListener delegationListener : parentJob.getDelegationListeners() ) { - // TODO: copy some settings in the job execution configuration, not strictly needed - // but the execution configuration information is useful in case of a job re-start - // - delegationListener.jobDelegationStarted( job, new JobExecutionConfiguration() ); - } - - JobEntryJobRunner runner = new JobEntryJobRunner( job, result, nr, log ); - Thread jobRunnerThread = new Thread( runner ); - // PDI-6518 - // added UUID to thread name, otherwise threads do share names if jobs entries are executed in parallel in a - // parent job - // if that happens, contained transformations start closing each other's connections - jobRunnerThread.setName( Const.NVL( job.getJobMeta().getName(), job.getJobMeta().getFilename() ) - + " UUID: " + UUID.randomUUID().toString() ); - jobRunnerThread.start(); - - // Keep running until we're done. - // - while ( !runner.isFinished() && !parentJob.isStopped() ) { + rep.saveJobEntryAttribute(id_job, getObjectId(), "pass_all_parameters", passingAllParameters); + } catch (KettleDatabaseException dbe) { + throw new KettleException( + "Unable to save job entry of type job to the repository with id_job=" + id_job, dbe); + } + } + + public Result execute(Result result, int nr) throws KettleException { + result.setEntryNr(nr); + + LogChannelFileWriter logChannelFileWriter = null; + + LogLevel jobLogLevel = parentJob.getLogLevel(); + if (setLogfile) { + String realLogFilename = environmentSubstitute(getLogFilename()); + // We need to check here the log filename + // if we do not have one, we must fail + if (Const.isEmpty(realLogFilename)) { + logError(BaseMessages.getString(PKG, "JobJob.Exception.LogFilenameMissing")); + result.setNrErrors(1); + result.setResult(false); + return result; + } + + // create parent folder? + if (!createParentFolder(realLogFilename)) { + result.setNrErrors(1); + result.setResult(false); + return result; + } try { - Thread.sleep( 0, 1 ); - } catch ( InterruptedException e ) { - // Ignore + logChannelFileWriter = + new LogChannelFileWriter( + this.getLogChannelId(), KettleVFS.getFileObject(realLogFilename), setAppendLogfile); + logChannelFileWriter.startLogging(); + } catch (KettleException e) { + logError("Unable to open file appender for file [" + getLogFilename() + "] : " + e.toString()); + logError(Const.getStackTracker(e)); + result.setNrErrors(1); + result.setResult(false); + return result; } - } + jobLogLevel = logFileLevel; + } - // if the parent-job was stopped, stop the sub-job too... - if ( parentJob.isStopped() ) { - job.stopAll(); - runner.waitUntilFinished(); // Wait until finished! - } + // Figure out the remote slave server... + // + SlaveServer remoteSlaveServer = null; + if (!Const.isEmpty(remoteSlaveServerName)) { + String realRemoteSlaveServerName = environmentSubstitute(remoteSlaveServerName); + remoteSlaveServer = parentJob.getJobMeta().findSlaveServer(realRemoteSlaveServerName); + if (remoteSlaveServer == null) { + throw new KettleException(BaseMessages.getString( + PKG, "JobJob.Exception.UnableToFindRemoteSlaveServer", realRemoteSlaveServerName)); + } + } + try { + // First load the job, outside of the loop... + if (parentJob.getJobMeta() != null) { + // reset the internal variables again. + // Maybe we should split up the variables even more like in UNIX shells. + // The internal variables need to be reset to be able use them properly + // in 2 sequential sub jobs. + parentJob.getJobMeta().setInternalKettleVariables(); + } - oneResult = runner.getResult(); + // Explain what we are loading... + // + switch (specificationMethod) { + case REPOSITORY_BY_NAME: + if (log.isDetailed()) { + logDetailed("Loading job from repository : [" + + directory + " : " + environmentSubstitute(jobname) + "]"); + } + break; + case FILENAME: + if (log.isDetailed()) { + logDetailed("Loading job from XML file : [" + environmentSubstitute(filename) + "]"); + } + break; + case REPOSITORY_BY_REFERENCE: + if (log.isDetailed()) { + logDetailed("Loading job from repository by reference : [" + jobObjectId + "]"); + } + break; + default: + break; + } - } else { + JobMeta jobMeta = getJobMeta(rep, this); - // Make sure we can parameterize the slave server connection - // - remoteSlaveServer.shareVariablesWith( this ); - - // Remote execution... - // - JobExecutionConfiguration jobExecutionConfiguration = new JobExecutionConfiguration(); - jobExecutionConfiguration.setPreviousResult( result.lightClone() ); // lightClone() because rows are - // overwritten in next line. - jobExecutionConfiguration.getPreviousResult().setRows( sourceRows ); - jobExecutionConfiguration.setArgumentStrings( args ); - jobExecutionConfiguration.setVariables( this ); - jobExecutionConfiguration.setRemoteServer( remoteSlaveServer ); - jobExecutionConfiguration.setRepository( rep ); - jobExecutionConfiguration.setLogLevel( jobLogLevel ); - jobExecutionConfiguration.setPassingExport( passingExport ); - jobExecutionConfiguration.setExpandingRemoteJob( expandingRemoteJob ); - for ( String param : namedParam.listParameters() ) { - String defValue = namedParam.getParameterDefault( param ); - String value = namedParam.getParameterValue( param ); - jobExecutionConfiguration.getParams().put( param, Const.NVL( value, defValue ) ); - } - if ( parentJob.getJobMeta().isBatchIdPassed() ) { - jobExecutionConfiguration.setPassedBatchId( parentJob.getBatchId() ); - } - - // Send the XML over to the slave server - // Also start the job over there... - // - String carteObjectId = null; - try { - carteObjectId = Job.sendToSlaveServer( jobMeta, jobExecutionConfiguration, rep, metaStore ); - } catch ( KettleException e ) { - // Perhaps the job exists on the remote server, carte is down, etc. - // This is an abort situation, stop the parent job... - // We want this in case we are running in parallel. The other job - // entries can stop running now. + // Verify that we loaded something, complain if we did not... // - parentJob.stopAll(); + if (jobMeta == null) { + throw new KettleException( + "Unable to load the job: please specify the name and repository directory OR a filename"); + } - // Pass the exception along - // - throw e; - } + verifyRecursiveExecution(parentJob, jobMeta); - // Now start the monitoring... - // - SlaveServerJobStatus jobStatus = null; - while ( !parentJob.isStopped() && waitingToFinish ) { - try { - jobStatus = remoteSlaveServer.getJobStatus( jobMeta.getName(), carteObjectId, 0 ); - if ( jobStatus.getResult() != null ) { - // The job is finished, get the result... - // - oneResult = jobStatus.getResult(); - break; - } - } catch ( Exception e1 ) { - logError( "Unable to contact slave server [" - + remoteSlaveServer + "] to verify the status of job [" + jobMeta.getName() + "]", e1 ); - oneResult.setNrErrors( 1L ); - break; // Stop looking too, chances are too low the server will - // come back on-line + int iteration = 0; + String[] args1 = arguments; + // no arguments? Check the parent jobs arguments + if (args1 == null || args1.length == 0) { + args1 = parentJob.getArguments(); } - // sleep for 1 second - try { - Thread.sleep( 1000 ); - } catch ( InterruptedException e ) { - // Ignore - } - } + copyVariablesFrom(parentJob); + setParentVariableSpace(parentJob); - if ( !waitingToFinish ) { - // Since the job was posted successfully, the result is true... // - oneResult = new Result(); - oneResult.setResult( true ); - } + // For the moment only do variable translation at the start of a job, not + // for every input row (if that would be switched on) + // + String[] args = null; + if (args1 != null) { + args = new String[args1.length]; + for (int idx = 0; idx < args1.length; idx++) { + args[idx] = environmentSubstitute(args1[idx]); + } + } - if ( parentJob.isStopped() ) { - try { - // See if we have a status and if we need to stop the remote - // execution here... - // - if ( jobStatus == null || jobStatus.isRunning() ) { - // Try a remote abort ... + RowMetaAndData resultRow = null; + boolean first = true; + List rows = new ArrayList(result.getRows()); + + while ((first && !execPerRow) + || (execPerRow && rows != null && iteration < rows.size() && result.getNrErrors() == 0)) { + first = false; + + // Clear the result rows of the result + // Otherwise we double the amount of rows every iteration in the simple cases. + // + if (execPerRow) { + result.getRows().clear(); + } + + if (rows != null && execPerRow) { + resultRow = rows.get(iteration); + } else { + resultRow = null; + } + + NamedParams namedParam = new NamedParamsDefault(); + + // First (optionally) copy all the parameter values from the parent job + // + if (paramsFromPrevious) { + String[] parentParameters = parentJob.listParameters(); + for (int idx = 0; idx < parentParameters.length; idx++) { + String par = parentParameters[idx]; + String def = parentJob.getParameterDefault(par); + String val = parentJob.getParameterValue(par); + String des = parentJob.getParameterDescription(par); + + namedParam.addParameterDefinition(par, def, des); + namedParam.setParameterValue(par, val); + } + } + + // Now add those parameter values specified by the user in the job entry + // + if (parameters != null) { + for (int idx = 0; idx < parameters.length; idx++) { + if (!Const.isEmpty(parameters[idx])) { + + // If it's not yet present in the parent job, add it... + // + if (Const.indexOfString(parameters[idx], namedParam.listParameters()) < 0) { + // We have a parameter + try { + namedParam.addParameterDefinition(parameters[idx], "", "Job entry runtime"); + } catch (DuplicateParamException e) { + // Should never happen + // + logError("Duplicate parameter definition for " + parameters[idx]); + } + } + + if (Const.isEmpty(Const.trim(parameterFieldNames[idx]))) { + namedParam.setParameterValue(parameters[idx], Const.NVL( + environmentSubstitute(parameterValues[idx]), "")); + } else { + // something filled in, in the field column... + // + String value = ""; + if (resultRow != null) { + value = resultRow.getString(parameterFieldNames[idx], ""); + } + namedParam.setParameterValue(parameters[idx], value); + } + } + } + } + + Result oneResult = new Result(); + + List sourceRows = null; + + if (execPerRow) { + // Execute for each input row + + if (argFromPrevious) { + // Copy the input row to the (command line) arguments + + args = null; + if (resultRow != null) { + args = new String[resultRow.size()]; + for (int i = 0; i < resultRow.size(); i++) { + args[i] = resultRow.getString(i, null); + } + } + } else { + // Just pass a single row + List newList = new ArrayList(); + newList.add(resultRow); + sourceRows = newList; + } + + if (paramsFromPrevious) { // Copy the input the parameters + + if (parameters != null) { + for (int idx = 0; idx < parameters.length; idx++) { + if (!Const.isEmpty(parameters[idx])) { + // We have a parameter + if (Const.isEmpty(Const.trim(parameterFieldNames[idx]))) { + namedParam.setParameterValue(parameters[idx], Const.NVL( + environmentSubstitute(parameterValues[idx]), "")); + } else { + String fieldValue = ""; + + if (resultRow != null) { + fieldValue = resultRow.getString(parameterFieldNames[idx], ""); + } + // Get the value from the input stream + namedParam.setParameterValue(parameters[idx], Const.NVL(fieldValue, "")); + } + } + } + } + } + } else { + if (argFromPrevious) { + // Only put the first Row on the arguments + args = null; + if (resultRow != null) { + args = new String[resultRow.size()]; + for (int i = 0; i < resultRow.size(); i++) { + args[i] = resultRow.getString(i, null); + } + } + } else { + // Keep it as it was... + sourceRows = result.getRows(); + } + + if (paramsFromPrevious) { // Copy the input the parameters + + if (parameters != null) { + for (int idx = 0; idx < parameters.length; idx++) { + if (!Const.isEmpty(parameters[idx])) { + // We have a parameter + if (Const.isEmpty(Const.trim(parameterFieldNames[idx]))) { + namedParam.setParameterValue(parameters[idx], Const.NVL( + environmentSubstitute(parameterValues[idx]), "")); + } else { + String fieldValue = ""; + + if (resultRow != null) { + fieldValue = resultRow.getString(parameterFieldNames[idx], ""); + } + // Get the value from the input stream + namedParam.setParameterValue(parameters[idx], Const.NVL(fieldValue, "")); + } + } + } + } + } + } + + if (remoteSlaveServer == null) { + // Local execution... + // + + // Create a new job + // + job = new Job(rep, jobMeta, this); + job.setParentJob(parentJob); + job.setLogLevel(jobLogLevel); + job.shareVariablesWith(this); + job.setInternalKettleVariables(this); + job.copyParametersFrom(jobMeta); + job.setInteractive(parentJob.isInteractive()); + if (job.isInteractive()) { + job.getJobEntryListeners().addAll(parentJob.getJobEntryListeners()); + } + + // Pass the socket repository all around. + // + job.setSocketRepository(parentJob.getSocketRepository()); + + // Set the parameters calculated above on this instance. + // + job.clearParameters(); + String[] parameterNames = job.listParameters(); + for (int idx = 0; idx < parameterNames.length; idx++) { + // Grab the parameter value set in the job entry + // + String thisValue = namedParam.getParameterValue(parameterNames[idx]); + if (!Const.isEmpty(thisValue)) { + // Set the value as specified by the user in the job entry + // + job.setParameterValue(parameterNames[idx], thisValue); + } else { + // See if the parameter had a value set in the parent job... + // This value should pass down to the sub-job if that's what we + // opted to do. + // + if (isPassingAllParameters()) { + String parentValue = parentJob.getParameterValue(parameterNames[idx]); + if (!Const.isEmpty(parentValue)) { + job.setParameterValue(parameterNames[idx], parentValue); + } + } + } + } + job.activateParameters(); + + // Set the source rows we calculated above... + // + job.setSourceRows(sourceRows); + + // Don't forget the logging... + job.beginProcessing(); + + // Link the job with the sub-job + parentJob.getJobTracker().addJobTracker(job.getJobTracker()); + + // Link both ways! + job.getJobTracker().setParentJobTracker(parentJob.getJobTracker()); + + if (parentJob.getJobMeta().isBatchIdPassed()) { + job.setPassedBatchId(parentJob.getBatchId()); + } + + job.setArguments(args); + + // Inform the parent job we started something here... + // + for (DelegationListener delegationListener : parentJob.getDelegationListeners()) { + // TODO: copy some settings in the job execution configuration, not strictly needed + // but the execution configuration information is useful in case of a job re-start + // + delegationListener.jobDelegationStarted(job, new JobExecutionConfiguration()); + } + + JobEntryJobRunner runner = new JobEntryJobRunner(job, result, nr, log); + Thread jobRunnerThread = new Thread(runner); + // PDI-6518 + // added UUID to thread name, otherwise threads do share names if jobs entries are executed in parallel in a + // parent job + // if that happens, contained transformations start closing each other's connections + jobRunnerThread.setName(Const.NVL(job.getJobMeta().getName(), job.getJobMeta().getFilename()) + + " UUID: " + UUID.randomUUID().toString()); + jobRunnerThread.start(); + + // Keep running until we're done. + // + while (!runner.isFinished() && !parentJob.isStopped()) { + try { + Thread.sleep(0, 1); + } catch (InterruptedException e) { + // Ignore + } + } + + // if the parent-job was stopped, stop the sub-job too... + if (parentJob.isStopped()) { + job.stopAll(); + runner.waitUntilFinished(); // Wait until finished! + } + + oneResult = runner.getResult(); + + } else { + + // Make sure we can parameterize the slave server connection + // + remoteSlaveServer.shareVariablesWith(this); + + // Remote execution... + // + JobExecutionConfiguration jobExecutionConfiguration = new JobExecutionConfiguration(); + jobExecutionConfiguration.setPreviousResult(result.lightClone()); // lightClone() because rows are + // overwritten in next line. + jobExecutionConfiguration.getPreviousResult().setRows(sourceRows); + jobExecutionConfiguration.setArgumentStrings(args); + jobExecutionConfiguration.setVariables(this); + jobExecutionConfiguration.setRemoteServer(remoteSlaveServer); + jobExecutionConfiguration.setRepository(rep); + jobExecutionConfiguration.setLogLevel(jobLogLevel); + jobExecutionConfiguration.setPassingExport(passingExport); + jobExecutionConfiguration.setExpandingRemoteJob(expandingRemoteJob); + for (String param : namedParam.listParameters()) { + String defValue = namedParam.getParameterDefault(param); + String value = namedParam.getParameterValue(param); + jobExecutionConfiguration.getParams().put(param, Const.NVL(value, defValue)); + } + if (parentJob.getJobMeta().isBatchIdPassed()) { + jobExecutionConfiguration.setPassedBatchId(parentJob.getBatchId()); + } + + // Send the XML over to the slave server + // Also start the job over there... + // + String carteObjectId = null; + try { + carteObjectId = Job.sendToSlaveServer(jobMeta, jobExecutionConfiguration, rep, metaStore); + } catch (KettleException e) { + // Perhaps the job exists on the remote server, carte is down, etc. + // This is an abort situation, stop the parent job... + // We want this in case we are running in parallel. The other job + // entries can stop running now. + // + parentJob.stopAll(); + + // Pass the exception along + // + throw e; + } + + // Now start the monitoring... + // + SlaveServerJobStatus jobStatus = null; + while (!parentJob.isStopped() && waitingToFinish) { + try { + jobStatus = remoteSlaveServer.getJobStatus(jobMeta.getName(), carteObjectId, 0); + if (jobStatus.getResult() != null) { + // The job is finished, get the result... + // + oneResult = jobStatus.getResult(); + break; + } + } catch (Exception e1) { + logError("Unable to contact slave server [" + + remoteSlaveServer + "] to verify the status of job [" + jobMeta.getName() + "]", e1); + oneResult.setNrErrors(1L); + break; // Stop looking too, chances are too low the server will + // come back on-line + } + + // sleep for 1 second + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + // Ignore + } + } + + if (!waitingToFinish) { + // Since the job was posted successfully, the result is true... + // + oneResult = new Result(); + oneResult.setResult(true); + } + + if (parentJob.isStopped()) { + try { + // See if we have a status and if we need to stop the remote + // execution here... + // + if (jobStatus == null || jobStatus.isRunning()) { + // Try a remote abort ... + // + remoteSlaveServer.stopJob(jobMeta.getName(), carteObjectId); + } + } catch (Exception e1) { + logError("Unable to contact slave server [" + + remoteSlaveServer + "] to stop job [" + jobMeta.getName() + "]", e1); + oneResult.setNrErrors(1L); + break; // Stop looking too, chances are too low the server will + // come back on-line + } + } + + } + + result.clear(); // clear only the numbers, NOT the files or rows. + result.add(oneResult); + + // Set the result rows too, if any ... + if (!Const.isEmpty(oneResult.getRows())) { + result.setRows(new ArrayList(oneResult.getRows())); + } + + // if one of them fails (in the loop), increase the number of errors // - remoteSlaveServer.stopJob( jobMeta.getName(), carteObjectId ); - } - } catch ( Exception e1 ) { - logError( "Unable to contact slave server [" - + remoteSlaveServer + "] to stop job [" + jobMeta.getName() + "]", e1 ); - oneResult.setNrErrors( 1L ); - break; // Stop looking too, chances are too low the server will - // come back on-line + if (oneResult.getResult() == false) { + result.setNrErrors(result.getNrErrors() + 1); + } + + iteration++; } - } + } catch (KettleException ke) { + logError("Error running job entry 'job' : ", ke); + + result.setResult(false); + result.setNrErrors(1L); } - result.clear(); // clear only the numbers, NOT the files or rows. - result.add( oneResult ); + if (setLogfile) { + if (logChannelFileWriter != null) { + logChannelFileWriter.stopLogging(); + + ResultFile resultFile = + new ResultFile( + ResultFile.FILE_TYPE_LOG, logChannelFileWriter.getLogFile(), parentJob.getJobname(), getName()); + result.getResultFiles().put(resultFile.getFile().toString(), resultFile); - // Set the result rows too, if any ... - if ( !Const.isEmpty( oneResult.getRows() ) ) { - result.setRows( new ArrayList( oneResult.getRows() ) ); + // See if anything went wrong during file writing... + // + if (logChannelFileWriter.getException() != null) { + logError("Unable to open log file [" + getLogFilename() + "] : "); + logError(Const.getStackTracker(logChannelFileWriter.getException())); + result.setNrErrors(1); + result.setResult(false); + return result; + } + } } - // if one of them fails (in the loop), increase the number of errors - // - if ( oneResult.getResult() == false ) { - result.setNrErrors( result.getNrErrors() + 1 ); + if (result.getNrErrors() > 0) { + result.setResult(false); + } else { + result.setResult(true); } - iteration++; - } + return result; + } - } catch ( KettleException ke ) { - logError( "Error running job entry 'job' : ", ke ); + private boolean createParentFolder(String filename) { + // Check for parent folder + FileObject parentfolder = null; + boolean resultat = true; + try { + // Get parent folder + parentfolder = KettleVFS.getFileObject(filename, this).getParent(); + if (!parentfolder.exists()) { + if (createParentFolder) { + if (log.isDebug()) { + log.logDebug(BaseMessages.getString(PKG, "JobJob.Log.ParentLogFolderNotExist", parentfolder + .getName().toString())); + } + parentfolder.createFolder(); + if (log.isDebug()) { + log.logDebug(BaseMessages.getString(PKG, "JobJob.Log.ParentLogFolderCreated", parentfolder + .getName().toString())); + } + } else { + log.logError(BaseMessages.getString(PKG, "JobJob.Log.ParentLogFolderNotExist", parentfolder + .getName().toString())); + resultat = false; + } + } else { + if (log.isDebug()) { + log.logDebug(BaseMessages.getString(PKG, "JobJob.Log.ParentLogFolderExists", parentfolder + .getName().toString())); + } + } + } catch (Exception e) { + resultat = false; + log.logError(BaseMessages.getString(PKG, "JobJob.Error.ChekingParentLogFolderTitle"), BaseMessages + .getString(PKG, "JobJob.Error.ChekingParentLogFolder", parentfolder.getName().toString()), e); + } finally { + if (parentfolder != null) { + try { + parentfolder.close(); + parentfolder = null; + } catch (Exception ex) { + // Ignore + } + } + } - result.setResult( false ); - result.setNrErrors( 1L ); + return resultat; } - if ( setLogfile ) { - if ( logChannelFileWriter != null ) { - logChannelFileWriter.stopLogging(); + /** + * Make sure that we are not loading jobs recursively... + * + * @param parentJobMeta the parent job metadata + * @param jobMeta the job metadata + * @throws KettleException in case both jobs are loaded from the same source + */ + private void verifyRecursiveExecution(Job parentJob, JobMeta jobMeta) throws KettleException { + + if (parentJob == null) { + return; // OK! + } + + JobMeta parentJobMeta = parentJob.getJobMeta(); + + if (parentJobMeta.getName() == null && jobMeta.getName() != null) { + return; // OK + } + if (parentJobMeta.getName() != null && jobMeta.getName() == null) { + return; // OK as well. + } + + // Not from the repository? just verify the filename + // + if (jobMeta.getFilename() != null && jobMeta.getFilename().equals(parentJobMeta.getFilename())) { + throw new KettleException(BaseMessages.getString(PKG, "JobJobError.Recursive", jobMeta.getFilename())); + } - ResultFile resultFile = - new ResultFile( - ResultFile.FILE_TYPE_LOG, logChannelFileWriter.getLogFile(), parentJob.getJobname(), getName() ); - result.getResultFiles().put( resultFile.getFile().toString(), resultFile ); + // Different directories: OK + if (parentJobMeta.getRepositoryDirectory() == null && jobMeta.getRepositoryDirectory() != null) { + return; + } + if (parentJobMeta.getRepositoryDirectory() != null && jobMeta.getRepositoryDirectory() == null) { + return; + } + if (jobMeta.getRepositoryDirectory().getObjectId() != parentJobMeta.getRepositoryDirectory().getObjectId()) { + return; + } - // See if anything went wrong during file writing... + // Same names, same directories : loaded from same location in the + // repository: + // --> recursive loading taking place! // - if ( logChannelFileWriter.getException() != null ) { - logError( "Unable to open log file [" + getLogFilename() + "] : " ); - logError( Const.getStackTracker( logChannelFileWriter.getException() ) ); - result.setNrErrors( 1 ); - result.setResult( false ); - return result; + if (parentJobMeta.getName().equals(jobMeta.getName())) { + throw new KettleException(BaseMessages.getString(PKG, "JobJobError.Recursive", jobMeta.getFilename())); } - } + + // Also compare with the grand-parent (if there is any) + verifyRecursiveExecution(parentJob.getParentJob(), jobMeta); } - if ( result.getNrErrors() > 0 ) { - result.setResult( false ); - } else { - result.setResult( true ); + public void clear() { + super.clear(); + + specificationMethod = ObjectLocationSpecificationMethod.FILENAME; + jobname = null; + filename = null; + directory = null; + arguments = null; + argFromPrevious = false; + addDate = false; + addTime = false; + logfile = null; + logext = null; + setLogfile = false; + setAppendLogfile = false; } - return result; - } - - private boolean createParentFolder( String filename ) { - // Check for parent folder - FileObject parentfolder = null; - boolean resultat = true; - try { - // Get parent folder - parentfolder = KettleVFS.getFileObject( filename, this ).getParent(); - if ( !parentfolder.exists() ) { - if ( createParentFolder ) { - if ( log.isDebug() ) { - log.logDebug( BaseMessages.getString( PKG, "JobJob.Log.ParentLogFolderNotExist", parentfolder - .getName().toString() ) ); - } - parentfolder.createFolder(); - if ( log.isDebug() ) { - log.logDebug( BaseMessages.getString( PKG, "JobJob.Log.ParentLogFolderCreated", parentfolder - .getName().toString() ) ); - } + public boolean evaluates() { + return true; + } + + public boolean isUnconditional() { + return true; + } + + public List getSQLStatements(Repository repository, IMetaStore metaStore, VariableSpace space) throws KettleException { + this.copyVariablesFrom(space); + JobMeta jobMeta = getJobMeta(repository, metaStore, space); + return jobMeta.getSQLStatements(repository, null); + } + + @Deprecated + public JobMeta getJobMeta(Repository rep, VariableSpace space) throws KettleException { + if (rep != null) { + return getJobMeta(rep, rep.getMetaStore(), space); } else { - log.logError( BaseMessages.getString( PKG, "JobJob.Log.ParentLogFolderNotExist", parentfolder - .getName().toString() ) ); - resultat = false; + return getJobMeta(rep, getMetaStore(), space); } - } else { - if ( log.isDebug() ) { - log.logDebug( BaseMessages.getString( PKG, "JobJob.Log.ParentLogFolderExists", parentfolder - .getName().toString() ) ); - } - } - } catch ( Exception e ) { - resultat = false; - log.logError( BaseMessages.getString( PKG, "JobJob.Error.ChekingParentLogFolderTitle" ), BaseMessages - .getString( PKG, "JobJob.Error.ChekingParentLogFolder", parentfolder.getName().toString() ), e ); - } finally { - if ( parentfolder != null ) { + } + + public JobMeta getJobMeta(Repository rep, IMetaStore metaStore, VariableSpace space) throws KettleException { + JobMeta jobMeta = null; try { - parentfolder.close(); - parentfolder = null; - } catch ( Exception ex ) { - // Ignore + CurrentDirectoryResolver r = new CurrentDirectoryResolver(); + VariableSpace tmpSpace = r.resolveCurrentDirectory( + specificationMethod, space, rep, parentJob, getFilename()); + switch (specificationMethod) { + case FILENAME: + String realFilename = tmpSpace.environmentSubstitute(getFilename()); + if (rep != null) { + // need to try to load from the repository + realFilename = r.normalizeSlashes(realFilename); + try { + String dirStr = realFilename.substring(0, realFilename.lastIndexOf("/")); + String tmpFilename = realFilename.substring(realFilename.lastIndexOf("/") + 1); + RepositoryDirectoryInterface dir = rep.findDirectory(dirStr); + jobMeta = rep.loadJob(tmpFilename, dir, null, null); + } catch (KettleException ke) { + // try without extension + if (realFilename.endsWith(Const.STRING_JOB_DEFAULT_EXT)) { + try { + String tmpFilename = realFilename.substring(realFilename.lastIndexOf("/") + 1, + realFilename.indexOf("." + Const.STRING_JOB_DEFAULT_EXT)); + String dirStr = realFilename.substring(0, realFilename.lastIndexOf("/")); + RepositoryDirectoryInterface dir = rep.findDirectory(dirStr); + jobMeta = rep.loadJob(tmpFilename, dir, null, null); + } catch (KettleException ke2) { + // fall back to try loading from file system (mappingJobMeta is going to be null) + } + } + } + } + if (jobMeta == null) { + jobMeta = new JobMeta(tmpSpace, realFilename, rep, metaStore, null); + } + break; + case REPOSITORY_BY_NAME: + String realDirectory = tmpSpace.environmentSubstitute(getDirectory()); + String realJobName = tmpSpace.environmentSubstitute(getJobName()); + + if (rep != null) { + realDirectory = r.normalizeSlashes(realDirectory); + RepositoryDirectoryInterface repositoryDirectory = + rep.loadRepositoryDirectoryTree().findDirectory(realDirectory); + if (repositoryDirectory == null) { + throw new KettleException("Unable to find repository directory [" + + Const.NVL(realDirectory, "") + "]"); + } + jobMeta = rep.loadJob(realJobName, repositoryDirectory, null, null); // reads + } else { + // rep is null, let's try loading by filename + try { + jobMeta = new JobMeta(tmpSpace, realDirectory + "/" + realJobName, rep, metaStore, null); + } catch (KettleException ke) { + try { + // add .kjb extension and try again + jobMeta = new JobMeta(tmpSpace, + realDirectory + "/" + realJobName + "." + Const.STRING_JOB_DEFAULT_EXT, rep, metaStore, null); + } catch (KettleException ke2) { + ke2.printStackTrace(); + throw new KettleException( + "Could not execute job specified in a repository since we're not connected to one"); + } + } + } + break; + case REPOSITORY_BY_REFERENCE: + if (rep != null) { + // Load the last version... + // + jobMeta = rep.loadJob(jobObjectId, null); + break; + } else { + throw new KettleException( + "Could not execute job specified in a repository since we're not connected to one"); + } + default: + throw new KettleException("The specified object location specification method '" + + specificationMethod + "' is not yet supported in this job entry."); + } + + if (jobMeta != null) { + jobMeta.setRepository(rep); + jobMeta.setMetaStore(metaStore); + } + + return jobMeta; + } catch (Exception e) { + throw new KettleException("Unexpected error during job metadata load", e); } - } + } - return resultat; - } - - /** - * Make sure that we are not loading jobs recursively... - * - * @param parentJobMeta - * the parent job metadata - * @param jobMeta - * the job metadata - * @throws KettleException - * in case both jobs are loaded from the same source - */ - private void verifyRecursiveExecution( Job parentJob, JobMeta jobMeta ) throws KettleException { - - if ( parentJob == null ) { - return; // OK! + /** + * @return Returns the runEveryResultRow. + */ + public boolean isExecPerRow() { + return execPerRow; } - JobMeta parentJobMeta = parentJob.getJobMeta(); + /** + * @param runEveryResultRow The runEveryResultRow to set. + */ + public void setExecPerRow(boolean runEveryResultRow) { + this.execPerRow = runEveryResultRow; + } - if ( parentJobMeta.getName() == null && jobMeta.getName() != null ) { - return; // OK + public List getResourceDependencies(JobMeta jobMeta) { + List references = super.getResourceDependencies(jobMeta); + if (!Const.isEmpty(filename)) { + String realFileName = jobMeta.environmentSubstitute(filename); + ResourceReference reference = new ResourceReference(this); + reference.getEntries().add(new ResourceEntry(realFileName, ResourceType.ACTIONFILE)); + references.add(reference); + } + return references; } - if ( parentJobMeta.getName() != null && jobMeta.getName() == null ) { - return; // OK as well. + + /** + * Exports the object to a flat-file system, adding content with filename keys to a set of definitions. The supplied + * resource naming interface allows the object to name appropriately without worrying about those parts of the + * implementation specific details. + * + * @param space The variable space to resolve (environment) variables with. + * @param definitions The map containing the filenames and content + * @param namingInterface The resource naming interface allows the object to be named appropriately + * @param repository The repository to load resources from + * @param metaStore the metaStore to load external metadata from + * @return The filename for this object. (also contained in the definitions map) + * @throws KettleException in case something goes wrong during the export + */ + public String exportResources(VariableSpace space, Map definitions, + ResourceNamingInterface namingInterface, Repository repository, IMetaStore metaStore) throws KettleException { + // Try to load the transformation from repository or file. + // Modify this recursively too... + // + // AGAIN: there is no need to clone this job entry because the caller is + // responsible for this. + // + // First load the job meta data... + // + copyVariablesFrom(space); // To make sure variables are available. + JobMeta jobMeta = getJobMeta(repository, metaStore, space); + + // Also go down into the job and export the files there. (going down + // recursively) + // + String proposedNewFilename = + jobMeta.exportResources(jobMeta, definitions, namingInterface, repository, metaStore); + + // To get a relative path to it, we inject + // ${Internal.Job.Filename.Directory} + // + String newFilename = "${" + Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY + "}/" + proposedNewFilename; + + // Set the filename in the job + // + jobMeta.setFilename(newFilename); + + // exports always reside in the root directory, in case we want to turn this + // into a file repository... + // + jobMeta.setRepositoryDirectory(new RepositoryDirectory()); + + // export to filename ALWAYS (this allows the exported XML to be executed remotely) + // + setSpecificationMethod(ObjectLocationSpecificationMethod.FILENAME); + + // change it in the job entry + // + filename = newFilename; + + return proposedNewFilename; } - // Not from the repository? just verify the filename - // - if ( jobMeta.getFilename() != null && jobMeta.getFilename().equals( parentJobMeta.getFilename() ) ) { - throw new KettleException( BaseMessages.getString( PKG, "JobJobError.Recursive", jobMeta.getFilename() ) ); + @Override + public void check(List remarks, JobMeta jobMeta, VariableSpace space, + Repository repository, IMetaStore metaStore) { + if (setLogfile) { + JobEntryValidatorUtils.andValidator().validate(this, "logfile", remarks, AndValidator.putValidators(JobEntryValidatorUtils.notBlankValidator())); + } + + if (null != directory) { + // if from repo + JobEntryValidatorUtils.andValidator().validate(this, "directory", remarks, AndValidator.putValidators(JobEntryValidatorUtils.notNullValidator())); + JobEntryValidatorUtils.andValidator().validate(this, "jobName", remarks, AndValidator.putValidators(JobEntryValidatorUtils.notBlankValidator())); + } else { + // else from xml file + JobEntryValidatorUtils.andValidator().validate(this, "filename", remarks, AndValidator.putValidators(JobEntryValidatorUtils.notBlankValidator())); + } } - // Different directories: OK - if ( parentJobMeta.getRepositoryDirectory() == null && jobMeta.getRepositoryDirectory() != null ) { - return; + public static void main(String[] args) { + List remarks = new ArrayList(); + new JobEntryJob().check(remarks, null, new Variables(), null, null); + System.out.printf("Remarks: %s\n", remarks); } - if ( parentJobMeta.getRepositoryDirectory() != null && jobMeta.getRepositoryDirectory() == null ) { - return; + + protected String getLogfile() { + return logfile; } - if ( jobMeta.getRepositoryDirectory().getObjectId() != parentJobMeta.getRepositoryDirectory().getObjectId() ) { - return; + + /** + * @return the remote slave server name + */ + public String getRemoteSlaveServerName() { + return remoteSlaveServerName; } - // Same names, same directories : loaded from same location in the - // repository: - // --> recursive loading taking place! - // - if ( parentJobMeta.getName().equals( jobMeta.getName() ) ) { - throw new KettleException( BaseMessages.getString( PKG, "JobJobError.Recursive", jobMeta.getFilename() ) ); + /** + * @param remoteSlaveServerName the remoteSlaveServer to set + */ + public void setRemoteSlaveServerName(String remoteSlaveServerName) { + this.remoteSlaveServerName = remoteSlaveServerName; } - // Also compare with the grand-parent (if there is any) - verifyRecursiveExecution( parentJob.getParentJob(), jobMeta ); - } - - public void clear() { - super.clear(); - - specificationMethod = ObjectLocationSpecificationMethod.FILENAME; - jobname = null; - filename = null; - directory = null; - arguments = null; - argFromPrevious = false; - addDate = false; - addTime = false; - logfile = null; - logext = null; - setLogfile = false; - setAppendLogfile = false; - } - - public boolean evaluates() { - return true; - } - - public boolean isUnconditional() { - return true; - } - - public List getSQLStatements( Repository repository, IMetaStore metaStore, VariableSpace space ) throws KettleException { - this.copyVariablesFrom( space ); - JobMeta jobMeta = getJobMeta( repository, metaStore, space ); - return jobMeta.getSQLStatements( repository, null ); - } - - @Deprecated - public JobMeta getJobMeta( Repository rep, VariableSpace space ) throws KettleException { - if ( rep != null ) { - return getJobMeta( rep, rep.getMetaStore(), space ); - } else { - return getJobMeta( rep, getMetaStore(), space ); + /** + * @return the waitingToFinish + */ + public boolean isWaitingToFinish() { + return waitingToFinish; } - } - - public JobMeta getJobMeta( Repository rep, IMetaStore metaStore, VariableSpace space ) throws KettleException { - JobMeta jobMeta = null; - try { - CurrentDirectoryResolver r = new CurrentDirectoryResolver(); - VariableSpace tmpSpace = r.resolveCurrentDirectory( - specificationMethod, space, rep, parentJob, getFilename() ); - switch ( specificationMethod ) { - case FILENAME: - String realFilename = tmpSpace.environmentSubstitute( getFilename() ); - if ( rep != null ) { - // need to try to load from the repository - realFilename = r.normalizeSlashes( realFilename ); - try { - String dirStr = realFilename.substring( 0, realFilename.lastIndexOf( "/" ) ); - String tmpFilename = realFilename.substring( realFilename.lastIndexOf( "/" ) + 1 ); - RepositoryDirectoryInterface dir = rep.findDirectory( dirStr ); - jobMeta = rep.loadJob( tmpFilename, dir, null, null ); - } catch ( KettleException ke ) { - // try without extension - if ( realFilename.endsWith( Const.STRING_JOB_DEFAULT_EXT ) ) { - try { - String tmpFilename = realFilename.substring( realFilename.lastIndexOf( "/" ) + 1, - realFilename.indexOf( "." + Const.STRING_JOB_DEFAULT_EXT ) ); - String dirStr = realFilename.substring( 0, realFilename.lastIndexOf( "/" ) ); - RepositoryDirectoryInterface dir = rep.findDirectory( dirStr ); - jobMeta = rep.loadJob( tmpFilename, dir, null, null ); - } catch ( KettleException ke2 ) { - // fall back to try loading from file system (mappingJobMeta is going to be null) - } - } - } - } - if ( jobMeta == null ) { - jobMeta = new JobMeta( tmpSpace, realFilename, rep, metaStore, null ); - } - break; - case REPOSITORY_BY_NAME: - String realDirectory = tmpSpace.environmentSubstitute( getDirectory() ); - String realJobName = tmpSpace.environmentSubstitute( getJobName() ); - - if ( rep != null ) { - realDirectory = r.normalizeSlashes( realDirectory ); - RepositoryDirectoryInterface repositoryDirectory = - rep.loadRepositoryDirectoryTree().findDirectory( realDirectory ); - if ( repositoryDirectory == null ) { - throw new KettleException( "Unable to find repository directory [" - + Const.NVL( realDirectory, "" ) + "]" ); - } - jobMeta = rep.loadJob( realJobName, repositoryDirectory, null, null ); // reads - } else { - // rep is null, let's try loading by filename - try { - jobMeta = new JobMeta( tmpSpace, realDirectory + "/" + realJobName, rep, metaStore, null ); - } catch ( KettleException ke ) { - try { - // add .kjb extension and try again - jobMeta = new JobMeta( tmpSpace, - realDirectory + "/" + realJobName + "." + Const.STRING_JOB_DEFAULT_EXT, rep, metaStore, null ); - } catch ( KettleException ke2 ) { - ke2.printStackTrace(); - throw new KettleException( - "Could not execute job specified in a repository since we're not connected to one" ); - } - } - } - break; - case REPOSITORY_BY_REFERENCE: - if ( rep != null ) { - // Load the last version... - // - jobMeta = rep.loadJob( jobObjectId, null ); - break; - } else { - throw new KettleException( - "Could not execute job specified in a repository since we're not connected to one" ); - } - default: - throw new KettleException( "The specified object location specification method '" - + specificationMethod + "' is not yet supported in this job entry." ); - } - - if ( jobMeta != null ) { - jobMeta.setRepository( rep ); - jobMeta.setMetaStore( metaStore ); - } - - return jobMeta; - } catch ( Exception e ) { - throw new KettleException( "Unexpected error during job metadata load", e ); + + /** + * @param waitingToFinish the waitingToFinish to set + */ + public void setWaitingToFinish(boolean waitingToFinish) { + this.waitingToFinish = waitingToFinish; } - } - - /** - * @return Returns the runEveryResultRow. - */ - public boolean isExecPerRow() { - return execPerRow; - } - - /** - * @param runEveryResultRow - * The runEveryResultRow to set. - */ - public void setExecPerRow( boolean runEveryResultRow ) { - this.execPerRow = runEveryResultRow; - } - - public List getResourceDependencies( JobMeta jobMeta ) { - List references = super.getResourceDependencies( jobMeta ); - if ( !Const.isEmpty( filename ) ) { - String realFileName = jobMeta.environmentSubstitute( filename ); - ResourceReference reference = new ResourceReference( this ); - reference.getEntries().add( new ResourceEntry( realFileName, ResourceType.ACTIONFILE ) ); - references.add( reference ); + /** + * @return the followingAbortRemotely + */ + public boolean isFollowingAbortRemotely() { + return followingAbortRemotely; } - return references; - } - - /** - * Exports the object to a flat-file system, adding content with filename keys to a set of definitions. The supplied - * resource naming interface allows the object to name appropriately without worrying about those parts of the - * implementation specific details. - * - * @param space - * The variable space to resolve (environment) variables with. - * @param definitions - * The map containing the filenames and content - * @param namingInterface - * The resource naming interface allows the object to be named appropriately - * @param repository - * The repository to load resources from - * @param metaStore - * the metaStore to load external metadata from - * - * @return The filename for this object. (also contained in the definitions map) - * @throws KettleException - * in case something goes wrong during the export - */ - public String exportResources( VariableSpace space, Map definitions, - ResourceNamingInterface namingInterface, Repository repository, IMetaStore metaStore ) throws KettleException { - // Try to load the transformation from repository or file. - // Modify this recursively too... - // - // AGAIN: there is no need to clone this job entry because the caller is - // responsible for this. - // - // First load the job meta data... - // - copyVariablesFrom( space ); // To make sure variables are available. - JobMeta jobMeta = getJobMeta( repository, metaStore, space ); - // Also go down into the job and export the files there. (going down - // recursively) - // - String proposedNewFilename = - jobMeta.exportResources( jobMeta, definitions, namingInterface, repository, metaStore ); + /** + * @param followingAbortRemotely the followingAbortRemotely to set + */ + public void setFollowingAbortRemotely(boolean followingAbortRemotely) { + this.followingAbortRemotely = followingAbortRemotely; + } - // To get a relative path to it, we inject - // ${Internal.Job.Filename.Directory} - // - String newFilename = "${" + Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY + "}/" + proposedNewFilename; + /** + * @return the passingAllParameters + */ + public boolean isPassingAllParameters() { + return passingAllParameters; + } - // Set the filename in the job - // - jobMeta.setFilename( newFilename ); + /** + * @param passingAllParameters the passingAllParameters to set + */ + public void setPassingAllParameters(boolean passingAllParameters) { + this.passingAllParameters = passingAllParameters; + } - // exports always reside in the root directory, in case we want to turn this - // into a file repository... - // - jobMeta.setRepositoryDirectory( new RepositoryDirectory() ); + public Job getJob() { + return job; + } - // export to filename ALWAYS (this allows the exported XML to be executed remotely) - // - setSpecificationMethod( ObjectLocationSpecificationMethod.FILENAME ); + /** + * @return the jobObjectId + */ + public ObjectId getJobObjectId() { + return jobObjectId; + } - // change it in the job entry - // - filename = newFilename; + /** + * @param jobObjectId the jobObjectId to set + */ + public void setJobObjectId(ObjectId jobObjectId) { + this.jobObjectId = jobObjectId; + } - return proposedNewFilename; - } + /** + * @return the specificationMethod + */ + public ObjectLocationSpecificationMethod getSpecificationMethod() { + return specificationMethod; + } - @Override - public void check( List remarks, JobMeta jobMeta, VariableSpace space, - Repository repository, IMetaStore metaStore ) { - if ( setLogfile ) { - JobEntryValidatorUtils.andValidator().validate( this, "logfile", remarks, AndValidator.putValidators( JobEntryValidatorUtils.notBlankValidator() ) ); + /** + * @param specificationMethod the specificationMethod to set + */ + public void setSpecificationMethod(ObjectLocationSpecificationMethod specificationMethod) { + this.specificationMethod = specificationMethod; } - if ( null != directory ) { - // if from repo - JobEntryValidatorUtils.andValidator().validate( this, "directory", remarks, AndValidator.putValidators( JobEntryValidatorUtils.notNullValidator() ) ); - JobEntryValidatorUtils.andValidator().validate( this, "jobName", remarks, AndValidator.putValidators( JobEntryValidatorUtils.notBlankValidator() ) ); - } else { - // else from xml file - JobEntryValidatorUtils.andValidator().validate( this, "filename", remarks, AndValidator.putValidators( JobEntryValidatorUtils.notBlankValidator() ) ); + @Override + public boolean hasRepositoryReferences() { + return specificationMethod == ObjectLocationSpecificationMethod.REPOSITORY_BY_REFERENCE; + } + + /** + * Look up the references after import + * + * @param repository the repository to reference. + */ + public void lookupRepositoryReferences(Repository repository) throws KettleException { + // The correct reference is stored in the job name and directory attributes... + // + RepositoryDirectoryInterface repositoryDirectoryInterface = + RepositoryImportLocation.getRepositoryImportLocation().findDirectory(directory); + jobObjectId = repository.getJobId(jobname, repositoryDirectoryInterface); + } + + private boolean isJobDefined() { + return !Const.isEmpty(filename) + || jobObjectId != null || (!Const.isEmpty(this.directory) && !Const.isEmpty(jobname)); + } + + public boolean[] isReferencedObjectEnabled() { + return new boolean[]{isJobDefined(),}; + } + + /** + * @return The objects referenced in the step, like a a transformation, a job, a mapper, a reducer, a combiner, ... + */ + public String[] getReferencedObjectDescriptions() { + return new String[]{BaseMessages.getString(PKG, "JobEntryJob.ReferencedObject.Description"),}; + } + + /** + * Load the referenced object + * + * @param index the referenced object index to load (in case there are multiple references) + * @param rep the repository + * @param metaStore the metaStore + * @param space the variable space to use + * @return the referenced object once loaded + * @throws KettleException + */ + public Object loadReferencedObject(int index, Repository rep, IMetaStore metaStore, VariableSpace space) throws KettleException { + return getJobMeta(rep, metaStore, space); + } + + public boolean isExpandingRemoteJob() { + return expandingRemoteJob; + } + + public void setExpandingRemoteJob(boolean expandingRemoteJob) { + this.expandingRemoteJob = expandingRemoteJob; } - } - - public static void main( String[] args ) { - List remarks = new ArrayList(); - new JobEntryJob().check( remarks, null, new Variables(), null, null ); - System.out.printf( "Remarks: %s\n", remarks ); - } - - protected String getLogfile() { - return logfile; - } - - /** - * @return the remote slave server name - */ - public String getRemoteSlaveServerName() { - return remoteSlaveServerName; - } - - /** - * @param remoteSlaveServerName - * the remoteSlaveServer to set - */ - public void setRemoteSlaveServerName( String remoteSlaveServerName ) { - this.remoteSlaveServerName = remoteSlaveServerName; - } - - /** - * @return the waitingToFinish - */ - public boolean isWaitingToFinish() { - return waitingToFinish; - } - - /** - * @param waitingToFinish - * the waitingToFinish to set - */ - public void setWaitingToFinish( boolean waitingToFinish ) { - this.waitingToFinish = waitingToFinish; - } - - /** - * @return the followingAbortRemotely - */ - public boolean isFollowingAbortRemotely() { - return followingAbortRemotely; - } - - /** - * @param followingAbortRemotely - * the followingAbortRemotely to set - */ - public void setFollowingAbortRemotely( boolean followingAbortRemotely ) { - this.followingAbortRemotely = followingAbortRemotely; - } - - /** - * @return the passingAllParameters - */ - public boolean isPassingAllParameters() { - return passingAllParameters; - } - - /** - * @param passingAllParameters - * the passingAllParameters to set - */ - public void setPassingAllParameters( boolean passingAllParameters ) { - this.passingAllParameters = passingAllParameters; - } - - public Job getJob() { - return job; - } - - /** - * @return the jobObjectId - */ - public ObjectId getJobObjectId() { - return jobObjectId; - } - - /** - * @param jobObjectId - * the jobObjectId to set - */ - public void setJobObjectId( ObjectId jobObjectId ) { - this.jobObjectId = jobObjectId; - } - - /** - * @return the specificationMethod - */ - public ObjectLocationSpecificationMethod getSpecificationMethod() { - return specificationMethod; - } - - /** - * @param specificationMethod - * the specificationMethod to set - */ - public void setSpecificationMethod( ObjectLocationSpecificationMethod specificationMethod ) { - this.specificationMethod = specificationMethod; - } - - @Override - public boolean hasRepositoryReferences() { - return specificationMethod == ObjectLocationSpecificationMethod.REPOSITORY_BY_REFERENCE; - } - - /** - * Look up the references after import - * - * @param repository - * the repository to reference. - */ - public void lookupRepositoryReferences( Repository repository ) throws KettleException { - // The correct reference is stored in the job name and directory attributes... - // - RepositoryDirectoryInterface repositoryDirectoryInterface = - RepositoryImportLocation.getRepositoryImportLocation().findDirectory( directory ); - jobObjectId = repository.getJobId( jobname, repositoryDirectoryInterface ); - } - - private boolean isJobDefined() { - return !Const.isEmpty( filename ) - || jobObjectId != null || ( !Const.isEmpty( this.directory ) && !Const.isEmpty( jobname ) ); - } - - public boolean[] isReferencedObjectEnabled() { - return new boolean[] { isJobDefined(), }; - } - - /** - * @return The objects referenced in the step, like a a transformation, a job, a mapper, a reducer, a combiner, ... - */ - public String[] getReferencedObjectDescriptions() { - return new String[] { BaseMessages.getString( PKG, "JobEntryJob.ReferencedObject.Description" ), }; - } - - /** - * Load the referenced object - * - * @param index - * the referenced object index to load (in case there are multiple references) - * @param rep - * the repository - * @param metaStore - * the metaStore - * @param space - * the variable space to use - * @return the referenced object once loaded - * @throws KettleException - */ - public Object loadReferencedObject( int index, Repository rep, IMetaStore metaStore, VariableSpace space ) throws KettleException { - return getJobMeta( rep, metaStore, space ); - } - - public boolean isExpandingRemoteJob() { - return expandingRemoteJob; - } - - public void setExpandingRemoteJob( boolean expandingRemoteJob ) { - this.expandingRemoteJob = expandingRemoteJob; - } } diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/job/entries/trans/JobEntryTrans.java b/pentaho-kettle/src/main/java/org/pentaho/di/job/entries/trans/JobEntryTrans.java index ac26b6f..83324a8 100644 --- a/pentaho-kettle/src/main/java/org/pentaho/di/job/entries/trans/JobEntryTrans.java +++ b/pentaho-kettle/src/main/java/org/pentaho/di/job/entries/trans/JobEntryTrans.java @@ -22,20 +22,8 @@ package org.pentaho.di.job.entries.trans; -import java.text.SimpleDateFormat; -import java.util.ArrayList; -import java.util.Calendar; -import java.util.List; -import java.util.Map; - import org.pentaho.di.cluster.SlaveServer; -import org.pentaho.di.core.CheckResultInterface; -import org.pentaho.di.core.Const; -import org.pentaho.di.core.ObjectLocationSpecificationMethod; -import org.pentaho.di.core.Result; -import org.pentaho.di.core.ResultFile; -import org.pentaho.di.core.RowMetaAndData; -import org.pentaho.di.core.SQLStatement; +import org.pentaho.di.core.*; import org.pentaho.di.core.database.DatabaseMeta; import org.pentaho.di.core.exception.KettleDatabaseException; import org.pentaho.di.core.exception.KettleException; @@ -57,14 +45,7 @@ import org.pentaho.di.job.entry.JobEntryInterface; import org.pentaho.di.job.entry.validator.AndValidator; import org.pentaho.di.job.entry.validator.JobEntryValidatorUtils; -import org.pentaho.di.repository.ObjectId; -import org.pentaho.di.repository.Repository; -import org.pentaho.di.repository.RepositoryDirectory; -import org.pentaho.di.repository.RepositoryDirectoryInterface; -import org.pentaho.di.repository.RepositoryImportLocation; -import org.pentaho.di.repository.RepositoryObject; -import org.pentaho.di.repository.RepositoryObjectType; -import org.pentaho.di.repository.StringObjectId; +import org.pentaho.di.repository.*; import org.pentaho.di.resource.ResourceDefinition; import org.pentaho.di.resource.ResourceEntry; import org.pentaho.di.resource.ResourceEntry.ResourceType; @@ -79,6 +60,12 @@ import org.pentaho.metastore.api.IMetaStore; import org.w3c.dom.Node; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.List; +import java.util.Map; + /** * This is the job entry that defines a transformation to be run. * @@ -86,1478 +73,1478 @@ * @since 1-Oct-2003, rewritten on 18-June-2004 */ public class JobEntryTrans extends JobEntryBase implements Cloneable, JobEntryInterface { - private static Class PKG = JobEntryTrans.class; // for i18n purposes, needed by Translator2!! + private static Class PKG = JobEntryTrans.class; // for i18n purposes, needed by Translator2!! - private String transname; + private String transname; - private String filename; + private String filename; - private String directory; + private String directory; - private ObjectId transObjectId; + private ObjectId transObjectId; - private ObjectLocationSpecificationMethod specificationMethod; + private ObjectLocationSpecificationMethod specificationMethod; - public String[] arguments; + public String[] arguments; - public boolean argFromPrevious; + public boolean argFromPrevious; - public boolean paramsFromPrevious; + public boolean paramsFromPrevious; - public boolean execPerRow; + public boolean execPerRow; - public String[] parameters; + public String[] parameters; - public String[] parameterFieldNames; + public String[] parameterFieldNames; - public String[] parameterValues; + public String[] parameterValues; - public boolean clearResultRows; + public boolean clearResultRows; - public boolean clearResultFiles; + public boolean clearResultFiles; - public boolean createParentFolder; + public boolean createParentFolder; - public boolean setLogfile; + public boolean setLogfile; - public boolean setAppendLogfile; + public boolean setAppendLogfile; - public String logfile, logext; + public String logfile, logext; - public boolean addDate, addTime; + public boolean addDate, addTime; - public LogLevel logFileLevel; + public LogLevel logFileLevel; - private String directoryPath; + private String directoryPath; - private boolean clustering; + private boolean clustering; - public boolean waitingToFinish = true; + public boolean waitingToFinish = true; - public boolean followingAbortRemotely; + public boolean followingAbortRemotely; - private String remoteSlaveServerName; + private String remoteSlaveServerName; - private boolean passingAllParameters = true; + private boolean passingAllParameters = true; - private boolean loggingRemoteWork; + private boolean loggingRemoteWork; - private Trans trans; + private Trans trans; - public JobEntryTrans( String name ) { - super( name, "" ); - } + public JobEntryTrans(String name) { + super(name, ""); + } - public JobEntryTrans() { - this( "" ); - clear(); - } + public JobEntryTrans() { + this(""); + clear(); + } - private void allocateArgs( int nrArgs ) { - arguments = new String[nrArgs]; - } + private void allocateArgs(int nrArgs) { + arguments = new String[nrArgs]; + } - private void allocateParams( int nrParameters ) { - parameters = new String[nrParameters]; - parameterFieldNames = new String[nrParameters]; - parameterValues = new String[nrParameters]; - } + private void allocateParams(int nrParameters) { + parameters = new String[nrParameters]; + parameterFieldNames = new String[nrParameters]; + parameterValues = new String[nrParameters]; + } - public Object clone() { - JobEntryTrans je = (JobEntryTrans) super.clone(); - if ( arguments != null ) { - int nrArgs = arguments.length; - je.allocateArgs( nrArgs ); - System.arraycopy( arguments, 0, je.arguments, 0, nrArgs ); + public Object clone() { + JobEntryTrans je = (JobEntryTrans) super.clone(); + if (arguments != null) { + int nrArgs = arguments.length; + je.allocateArgs(nrArgs); + System.arraycopy(arguments, 0, je.arguments, 0, nrArgs); + } + if (parameters != null) { + int nrParameters = parameters.length; + je.allocateParams(nrParameters); + System.arraycopy(parameters, 0, je.parameters, 0, nrParameters); + System.arraycopy(parameterFieldNames, 0, je.parameterFieldNames, 0, nrParameters); + System.arraycopy(parameterValues, 0, je.parameterValues, 0, nrParameters); + } + return je; } - if ( parameters != null ) { - int nrParameters = parameters.length; - je.allocateParams( nrParameters ); - System.arraycopy( parameters, 0, je.parameters, 0, nrParameters ); - System.arraycopy( parameterFieldNames, 0, je.parameterFieldNames, 0, nrParameters ); - System.arraycopy( parameterValues, 0, je.parameterValues, 0, nrParameters ); + + public void setFileName(String n) { + filename = n; } - return je; - } - - public void setFileName( String n ) { - filename = n; - } - - /** - * @return the filename - * @deprecated use getFilename() instead - */ - @Deprecated - public String getFileName() { - return filename; - } - - public String getFilename() { - return filename; - } - - public String getRealFilename() { - return environmentSubstitute( getFilename() ); - } - - public void setTransname( String transname ) { - this.transname = transname; - } - - public String getTransname() { - return transname; - } - - public String getDirectory() { - return directory; - } - - public void setDirectory( String directory ) { - this.directory = directory; - } - - public String getLogFilename() { - String retval = ""; - if ( setLogfile ) { - retval += logfile == null ? "" : logfile; - Calendar cal = Calendar.getInstance(); - if ( addDate ) { - SimpleDateFormat sdf = new SimpleDateFormat( "yyyyMMdd" ); - retval += "_" + sdf.format( cal.getTime() ); - } - if ( addTime ) { - SimpleDateFormat sdf = new SimpleDateFormat( "HHmmss" ); - retval += "_" + sdf.format( cal.getTime() ); - } - if ( logext != null && logext.length() > 0 ) { - retval += "." + logext; - } + + /** + * @return the filename + * @deprecated use getFilename() instead + */ + @Deprecated + public String getFileName() { + return filename; } - return retval; - } - public String getXML() { - StringBuffer retval = new StringBuffer( 300 ); + public String getFilename() { + return filename; + } - retval.append( super.getXML() ); + public String getRealFilename() { + return environmentSubstitute(getFilename()); + } - // specificationMethod - // - retval.append( " " ).append( - XMLHandler.addTagValue( "specification_method", specificationMethod == null ? null : specificationMethod - .getCode() ) - ); - retval.append( " " ).append( - XMLHandler.addTagValue( "trans_object_id", transObjectId == null ? null : transObjectId.toString() ) ); - // Export a little bit of extra information regarding the reference since it doesn't really matter outside the same - // repository. - // - if ( rep != null && transObjectId != null ) { - try { - RepositoryObject objectInformation = - rep.getObjectInformation( transObjectId, RepositoryObjectType.TRANSFORMATION ); - if ( objectInformation != null ) { - transname = objectInformation.getName(); - directory = objectInformation.getRepositoryDirectory().getPath(); - } - } catch ( KettleException e ) { - // Ignore object reference problems. It simply means that the reference is no longer valid. - } + public void setTransname(String transname) { + this.transname = transname; } - retval.append( " " ).append( XMLHandler.addTagValue( "filename", filename ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "transname", transname ) ); - - if ( directory != null ) { - retval.append( " " ).append( XMLHandler.addTagValue( "directory", directory ) ); - } else if ( directoryPath != null ) { - // don't loose this info (backup/recovery) - // - retval.append( " " ).append( XMLHandler.addTagValue( "directory", directoryPath ) ); + + public String getTransname() { + return transname; } - retval.append( " " ).append( XMLHandler.addTagValue( "arg_from_previous", argFromPrevious ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "params_from_previous", paramsFromPrevious ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "exec_per_row", execPerRow ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "clear_rows", clearResultRows ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "clear_files", clearResultFiles ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "set_logfile", setLogfile ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "logfile", logfile ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "logext", logext ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "add_date", addDate ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "add_time", addTime ) ); - retval.append( " " ).append( - XMLHandler.addTagValue( "loglevel", logFileLevel != null ? logFileLevel.getCode() : null ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "cluster", clustering ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "slave_server_name", remoteSlaveServerName ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "set_append_logfile", setAppendLogfile ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "wait_until_finished", waitingToFinish ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "follow_abort_remote", followingAbortRemotely ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "create_parent_folder", createParentFolder ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "logging_remote_work", loggingRemoteWork ) ); - - if ( arguments != null ) { - for ( int i = 0; i < arguments.length; i++ ) { - // This is a very very bad way of making an XML file, don't use it (or - // copy it). Sven Boden - retval.append( " " ).append( XMLHandler.addTagValue( "argument" + i, arguments[ i ] ) ); - } + + public String getDirectory() { + return directory; } - if ( parameters != null ) { - retval.append( " " ).append( XMLHandler.openTag( "parameters" ) ); + public void setDirectory(String directory) { + this.directory = directory; + } - retval.append( " " ).append( XMLHandler.addTagValue( "pass_all_parameters", passingAllParameters ) ); + public String getLogFilename() { + String retval = ""; + if (setLogfile) { + retval += logfile == null ? "" : logfile; + Calendar cal = Calendar.getInstance(); + if (addDate) { + SimpleDateFormat sdf = new SimpleDateFormat("yyyyMMdd"); + retval += "_" + sdf.format(cal.getTime()); + } + if (addTime) { + SimpleDateFormat sdf = new SimpleDateFormat("HHmmss"); + retval += "_" + sdf.format(cal.getTime()); + } + if (logext != null && logext.length() > 0) { + retval += "." + logext; + } + } + return retval; + } - for ( int i = 0; i < parameters.length; i++ ) { - // This is a better way of making the XML file than the arguments. - retval.append( " " ).append( XMLHandler.openTag( "parameter" ) ); + public String getXML() { + StringBuffer retval = new StringBuffer(300); - retval.append( " " ).append( XMLHandler.addTagValue( "name", parameters[ i ] ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "stream_name", parameterFieldNames[ i ] ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "value", parameterValues[ i ] ) ); + retval.append(super.getXML()); - retval.append( " " ).append( XMLHandler.closeTag( "parameter" ) ); - } - retval.append( " " ).append( XMLHandler.closeTag( "parameters" ) ); - } + // specificationMethod + // + retval.append(" ").append( + XMLHandler.addTagValue("specification_method", specificationMethod == null ? null : specificationMethod + .getCode()) + ); + retval.append(" ").append( + XMLHandler.addTagValue("trans_object_id", transObjectId == null ? null : transObjectId.toString())); + // Export a little bit of extra information regarding the reference since it doesn't really matter outside the same + // repository. + // + if (rep != null && transObjectId != null) { + try { + RepositoryObject objectInformation = + rep.getObjectInformation(transObjectId, RepositoryObjectType.TRANSFORMATION); + if (objectInformation != null) { + transname = objectInformation.getName(); + directory = objectInformation.getRepositoryDirectory().getPath(); + } + } catch (KettleException e) { + // Ignore object reference problems. It simply means that the reference is no longer valid. + } + } + retval.append(" ").append(XMLHandler.addTagValue("filename", filename)); + retval.append(" ").append(XMLHandler.addTagValue("transname", transname)); + + if (directory != null) { + retval.append(" ").append(XMLHandler.addTagValue("directory", directory)); + } else if (directoryPath != null) { + // don't loose this info (backup/recovery) + // + retval.append(" ").append(XMLHandler.addTagValue("directory", directoryPath)); + } + retval.append(" ").append(XMLHandler.addTagValue("arg_from_previous", argFromPrevious)); + retval.append(" ").append(XMLHandler.addTagValue("params_from_previous", paramsFromPrevious)); + retval.append(" ").append(XMLHandler.addTagValue("exec_per_row", execPerRow)); + retval.append(" ").append(XMLHandler.addTagValue("clear_rows", clearResultRows)); + retval.append(" ").append(XMLHandler.addTagValue("clear_files", clearResultFiles)); + retval.append(" ").append(XMLHandler.addTagValue("set_logfile", setLogfile)); + retval.append(" ").append(XMLHandler.addTagValue("logfile", logfile)); + retval.append(" ").append(XMLHandler.addTagValue("logext", logext)); + retval.append(" ").append(XMLHandler.addTagValue("add_date", addDate)); + retval.append(" ").append(XMLHandler.addTagValue("add_time", addTime)); + retval.append(" ").append( + XMLHandler.addTagValue("loglevel", logFileLevel != null ? logFileLevel.getCode() : null)); + retval.append(" ").append(XMLHandler.addTagValue("cluster", clustering)); + retval.append(" ").append(XMLHandler.addTagValue("slave_server_name", remoteSlaveServerName)); + retval.append(" ").append(XMLHandler.addTagValue("set_append_logfile", setAppendLogfile)); + retval.append(" ").append(XMLHandler.addTagValue("wait_until_finished", waitingToFinish)); + retval.append(" ").append(XMLHandler.addTagValue("follow_abort_remote", followingAbortRemotely)); + retval.append(" ").append(XMLHandler.addTagValue("create_parent_folder", createParentFolder)); + retval.append(" ").append(XMLHandler.addTagValue("logging_remote_work", loggingRemoteWork)); + + if (arguments != null) { + for (int i = 0; i < arguments.length; i++) { + // This is a very very bad way of making an XML file, don't use it (or + // copy it). Sven Boden + retval.append(" ").append(XMLHandler.addTagValue("argument" + i, arguments[i])); + } + } - return retval.toString(); - } + if (parameters != null) { + retval.append(" ").append(XMLHandler.openTag("parameters")); - private void checkObjectLocationSpecificationMethod() { - if ( specificationMethod == null ) { - // Backward compatibility - // - // Default = Filename - // - specificationMethod = ObjectLocationSpecificationMethod.FILENAME; + retval.append(" ").append(XMLHandler.addTagValue("pass_all_parameters", passingAllParameters)); - if ( !Const.isEmpty( filename ) ) { - specificationMethod = ObjectLocationSpecificationMethod.FILENAME; - } else if ( transObjectId != null ) { - specificationMethod = ObjectLocationSpecificationMethod.REPOSITORY_BY_REFERENCE; - } else if ( !Const.isEmpty( transname ) ) { - specificationMethod = ObjectLocationSpecificationMethod.REPOSITORY_BY_NAME; - } + for (int i = 0; i < parameters.length; i++) { + // This is a better way of making the XML file than the arguments. + retval.append(" ").append(XMLHandler.openTag("parameter")); + + retval.append(" ").append(XMLHandler.addTagValue("name", parameters[i])); + retval.append(" ").append(XMLHandler.addTagValue("stream_name", parameterFieldNames[i])); + retval.append(" ").append(XMLHandler.addTagValue("value", parameterValues[i])); + + retval.append(" ").append(XMLHandler.closeTag("parameter")); + } + retval.append(" ").append(XMLHandler.closeTag("parameters")); + } + + return retval.toString(); } - } - - public void loadXML( Node entrynode, List databases, List slaveServers, - Repository rep, IMetaStore metaStore ) throws KettleXMLException { - try { - super.loadXML( entrynode, databases, slaveServers ); - - String method = XMLHandler.getTagValue( entrynode, "specification_method" ); - specificationMethod = ObjectLocationSpecificationMethod.getSpecificationMethodByCode( method ); - - String transId = XMLHandler.getTagValue( entrynode, "trans_object_id" ); - transObjectId = Const.isEmpty( transId ) ? null : new StringObjectId( transId ); - filename = XMLHandler.getTagValue( entrynode, "filename" ); - transname = XMLHandler.getTagValue( entrynode, "transname" ); - directory = XMLHandler.getTagValue( entrynode, "directory" ); - - if ( rep != null && rep.isConnected() && !Const.isEmpty( transname ) ) { - specificationMethod = ObjectLocationSpecificationMethod.REPOSITORY_BY_NAME; - } - - // Backward compatibility check for object specification - // - checkObjectLocationSpecificationMethod(); - - argFromPrevious = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "arg_from_previous" ) ); - paramsFromPrevious = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "params_from_previous" ) ); - execPerRow = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "exec_per_row" ) ); - clearResultRows = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "clear_rows" ) ); - clearResultFiles = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "clear_files" ) ); - setLogfile = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "set_logfile" ) ); - addDate = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "add_date" ) ); - addTime = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "add_time" ) ); - logfile = XMLHandler.getTagValue( entrynode, "logfile" ); - logext = XMLHandler.getTagValue( entrynode, "logext" ); - logFileLevel = LogLevel.getLogLevelForCode( XMLHandler.getTagValue( entrynode, "loglevel" ) ); - clustering = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "cluster" ) ); - createParentFolder = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "create_parent_folder" ) ); - loggingRemoteWork = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "logging_remote_work" ) ); - - remoteSlaveServerName = XMLHandler.getTagValue( entrynode, "slave_server_name" ); - - setAppendLogfile = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "set_append_logfile" ) ); - String wait = XMLHandler.getTagValue( entrynode, "wait_until_finished" ); - if ( Const.isEmpty( wait ) ) { - waitingToFinish = true; - } else { - waitingToFinish = "Y".equalsIgnoreCase( wait ); - } - followingAbortRemotely = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "follow_abort_remote" ) ); + private void checkObjectLocationSpecificationMethod() { + if (specificationMethod == null) { + // Backward compatibility + // + // Default = Filename + // + specificationMethod = ObjectLocationSpecificationMethod.FILENAME; + + if (!Const.isEmpty(filename)) { + specificationMethod = ObjectLocationSpecificationMethod.FILENAME; + } else if (transObjectId != null) { + specificationMethod = ObjectLocationSpecificationMethod.REPOSITORY_BY_REFERENCE; + } else if (!Const.isEmpty(transname)) { + specificationMethod = ObjectLocationSpecificationMethod.REPOSITORY_BY_NAME; + } + } + } - // How many arguments? - int argnr = 0; - while ( XMLHandler.getTagValue( entrynode, "argument" + argnr ) != null ) { - argnr++; - } - allocateArgs( argnr ); + public void loadXML(Node entrynode, List databases, List slaveServers, + Repository rep, IMetaStore metaStore) throws KettleXMLException { + try { + super.loadXML(entrynode, databases, slaveServers); - // Read them all... - for ( int a = 0; a < argnr; a++ ) { - arguments[ a ] = XMLHandler.getTagValue( entrynode, "argument" + a ); - } + String method = XMLHandler.getTagValue(entrynode, "specification_method"); + specificationMethod = ObjectLocationSpecificationMethod.getSpecificationMethodByCode(method); - Node parametersNode = XMLHandler.getSubNode( entrynode, "parameters" ); + String transId = XMLHandler.getTagValue(entrynode, "trans_object_id"); + transObjectId = Const.isEmpty(transId) ? null : new StringObjectId(transId); + filename = XMLHandler.getTagValue(entrynode, "filename"); + transname = XMLHandler.getTagValue(entrynode, "transname"); + directory = XMLHandler.getTagValue(entrynode, "directory"); - String passAll = XMLHandler.getTagValue( parametersNode, "pass_all_parameters" ); - passingAllParameters = Const.isEmpty( passAll ) || "Y".equalsIgnoreCase( passAll ); + if (rep != null && rep.isConnected() && !Const.isEmpty(transname)) { + specificationMethod = ObjectLocationSpecificationMethod.REPOSITORY_BY_NAME; + } - int nrParameters = XMLHandler.countNodes( parametersNode, "parameter" ); - allocateParams( nrParameters ); + // Backward compatibility check for object specification + // + checkObjectLocationSpecificationMethod(); + + argFromPrevious = "Y".equalsIgnoreCase(XMLHandler.getTagValue(entrynode, "arg_from_previous")); + paramsFromPrevious = "Y".equalsIgnoreCase(XMLHandler.getTagValue(entrynode, "params_from_previous")); + execPerRow = "Y".equalsIgnoreCase(XMLHandler.getTagValue(entrynode, "exec_per_row")); + clearResultRows = "Y".equalsIgnoreCase(XMLHandler.getTagValue(entrynode, "clear_rows")); + clearResultFiles = "Y".equalsIgnoreCase(XMLHandler.getTagValue(entrynode, "clear_files")); + setLogfile = "Y".equalsIgnoreCase(XMLHandler.getTagValue(entrynode, "set_logfile")); + addDate = "Y".equalsIgnoreCase(XMLHandler.getTagValue(entrynode, "add_date")); + addTime = "Y".equalsIgnoreCase(XMLHandler.getTagValue(entrynode, "add_time")); + logfile = XMLHandler.getTagValue(entrynode, "logfile"); + logext = XMLHandler.getTagValue(entrynode, "logext"); + logFileLevel = LogLevel.getLogLevelForCode(XMLHandler.getTagValue(entrynode, "loglevel")); + clustering = "Y".equalsIgnoreCase(XMLHandler.getTagValue(entrynode, "cluster")); + createParentFolder = "Y".equalsIgnoreCase(XMLHandler.getTagValue(entrynode, "create_parent_folder")); + loggingRemoteWork = "Y".equalsIgnoreCase(XMLHandler.getTagValue(entrynode, "logging_remote_work")); + + remoteSlaveServerName = XMLHandler.getTagValue(entrynode, "slave_server_name"); + + setAppendLogfile = "Y".equalsIgnoreCase(XMLHandler.getTagValue(entrynode, "set_append_logfile")); + String wait = XMLHandler.getTagValue(entrynode, "wait_until_finished"); + if (Const.isEmpty(wait)) { + waitingToFinish = true; + } else { + waitingToFinish = "Y".equalsIgnoreCase(wait); + } - for ( int i = 0; i < nrParameters; i++ ) { - Node knode = XMLHandler.getSubNodeByNr( parametersNode, "parameter", i ); + followingAbortRemotely = "Y".equalsIgnoreCase(XMLHandler.getTagValue(entrynode, "follow_abort_remote")); - parameters[ i ] = XMLHandler.getTagValue( knode, "name" ); - parameterFieldNames[ i ] = XMLHandler.getTagValue( knode, "stream_name" ); - parameterValues[ i ] = XMLHandler.getTagValue( knode, "value" ); - } - } catch ( KettleException e ) { - throw new KettleXMLException( "Unable to load job entry of type 'trans' from XML node", e ); - } - } - - // Load the jobentry from repository - // - public void loadRep( Repository rep, IMetaStore metaStore, ObjectId id_jobentry, List databases, - List slaveServers ) throws KettleException { - try { - String method = rep.getJobEntryAttributeString( id_jobentry, "specification_method" ); - specificationMethod = ObjectLocationSpecificationMethod.getSpecificationMethodByCode( method ); - String transId = rep.getJobEntryAttributeString( id_jobentry, "trans_object_id" ); - transObjectId = Const.isEmpty( transId ) ? null : new StringObjectId( transId ); - transname = rep.getJobEntryAttributeString( id_jobentry, "name" ); - directory = rep.getJobEntryAttributeString( id_jobentry, "dir_path" ); - filename = rep.getJobEntryAttributeString( id_jobentry, "file_name" ); - - // Backward compatibility check for object specification - // - checkObjectLocationSpecificationMethod(); - - argFromPrevious = rep.getJobEntryAttributeBoolean( id_jobentry, "arg_from_previous" ); - paramsFromPrevious = rep.getJobEntryAttributeBoolean( id_jobentry, "params_from_previous" ); - execPerRow = rep.getJobEntryAttributeBoolean( id_jobentry, "exec_per_row" ); - clearResultRows = rep.getJobEntryAttributeBoolean( id_jobentry, "clear_rows", true ); - clearResultFiles = rep.getJobEntryAttributeBoolean( id_jobentry, "clear_files", true ); - setLogfile = rep.getJobEntryAttributeBoolean( id_jobentry, "set_logfile" ); - addDate = rep.getJobEntryAttributeBoolean( id_jobentry, "add_date" ); - addTime = rep.getJobEntryAttributeBoolean( id_jobentry, "add_time" ); - logfile = rep.getJobEntryAttributeString( id_jobentry, "logfile" ); - logext = rep.getJobEntryAttributeString( id_jobentry, "logext" ); - logFileLevel = LogLevel.getLogLevelForCode( rep.getJobEntryAttributeString( id_jobentry, "loglevel" ) ); - clustering = rep.getJobEntryAttributeBoolean( id_jobentry, "cluster" ); - createParentFolder = rep.getJobEntryAttributeBoolean( id_jobentry, "create_parent_folder" ); - - remoteSlaveServerName = rep.getJobEntryAttributeString( id_jobentry, "slave_server_name" ); - setAppendLogfile = rep.getJobEntryAttributeBoolean( id_jobentry, "set_append_logfile" ); - waitingToFinish = rep.getJobEntryAttributeBoolean( id_jobentry, "wait_until_finished", true ); - followingAbortRemotely = rep.getJobEntryAttributeBoolean( id_jobentry, "follow_abort_remote" ); - loggingRemoteWork = rep.getJobEntryAttributeBoolean( id_jobentry, "logging_remote_work" ); - - // How many arguments? - int argnr = rep.countNrJobEntryAttributes( id_jobentry, "argument" ); - allocateArgs( argnr ); - - // Read all arguments... - for ( int a = 0; a < argnr; a++ ) { - arguments[ a ] = rep.getJobEntryAttributeString( id_jobentry, a, "argument" ); - } - - // How many arguments? - int parameternr = rep.countNrJobEntryAttributes( id_jobentry, "parameter_name" ); - allocateParams( parameternr ); - - // Read all parameters ... - for ( int a = 0; a < parameternr; a++ ) { - parameters[ a ] = rep.getJobEntryAttributeString( id_jobentry, a, "parameter_name" ); - parameterFieldNames[ a ] = rep.getJobEntryAttributeString( id_jobentry, a, "parameter_stream_name" ); - parameterValues[ a ] = rep.getJobEntryAttributeString( id_jobentry, a, "parameter_value" ); - } - - passingAllParameters = rep.getJobEntryAttributeBoolean( id_jobentry, "pass_all_parameters", true ); - - } catch ( KettleDatabaseException dbe ) { - throw new KettleException( "Unable to load job entry of type 'trans' from the repository for id_jobentry=" - + id_jobentry, dbe ); - } - } - - // Save the attributes of this job entry - // - public void saveRep( Repository rep, IMetaStore metaStore, ObjectId id_job ) throws KettleException { - try { - rep.saveJobEntryAttribute( id_job, getObjectId(), "specification_method", specificationMethod == null - ? null : specificationMethod.getCode() ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "trans_object_id", transObjectId == null - ? null : transObjectId.toString() ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "name", getTransname() ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "dir_path", getDirectory() != null ? getDirectory() : "" ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "file_name", filename ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "arg_from_previous", argFromPrevious ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "params_from_previous", paramsFromPrevious ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "exec_per_row", execPerRow ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "clear_rows", clearResultRows ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "clear_files", clearResultFiles ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "set_logfile", setLogfile ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "add_date", addDate ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "add_time", addTime ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "logfile", logfile ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "logext", logext ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "loglevel", logFileLevel != null - ? logFileLevel.getCode() : null ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "cluster", clustering ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "slave_server_name", remoteSlaveServerName ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "set_append_logfile", setAppendLogfile ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "wait_until_finished", waitingToFinish ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "follow_abort_remote", followingAbortRemotely ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "create_parent_folder", createParentFolder ); - rep.saveJobEntryAttribute( id_job, getObjectId(), "logging_remote_work", loggingRemoteWork ); - - // Save the arguments... - if ( arguments != null ) { - for ( int i = 0; i < arguments.length; i++ ) { - rep.saveJobEntryAttribute( id_job, getObjectId(), i, "argument", arguments[ i ] ); - } - } - - // Save the parameters... - if ( parameters != null ) { - for ( int i = 0; i < parameters.length; i++ ) { - rep.saveJobEntryAttribute( id_job, getObjectId(), i, "parameter_name", parameters[ i ] ); - rep.saveJobEntryAttribute( id_job, getObjectId(), i, "parameter_stream_name", Const.NVL( - parameterFieldNames[ i ], "" ) ); - rep.saveJobEntryAttribute( id_job, getObjectId(), i, "parameter_value", Const.NVL( - parameterValues[ i ], "" ) ); - } - } + // How many arguments? + int argnr = 0; + while (XMLHandler.getTagValue(entrynode, "argument" + argnr) != null) { + argnr++; + } + allocateArgs(argnr); - rep.saveJobEntryAttribute( id_job, getObjectId(), "pass_all_parameters", passingAllParameters ); + // Read them all... + for (int a = 0; a < argnr; a++) { + arguments[a] = XMLHandler.getTagValue(entrynode, "argument" + a); + } - } catch ( KettleDatabaseException dbe ) { - throw new KettleException( - "Unable to save job entry of type 'trans' to the repository for id_job=" + id_job, dbe ); - } - } - - public void clear() { - super.clear(); - - specificationMethod = ObjectLocationSpecificationMethod.FILENAME; - transname = null; - filename = null; - directory = null; - arguments = null; - argFromPrevious = false; - execPerRow = false; - addDate = false; - addTime = false; - logfile = null; - logext = null; - setLogfile = false; - clearResultRows = false; - clearResultFiles = false; - remoteSlaveServerName = null; - setAppendLogfile = false; - waitingToFinish = true; - followingAbortRemotely = false; // backward compatibility reasons - createParentFolder = false; - logFileLevel = LogLevel.BASIC; - } - - /** - * Execute this job entry and return the result. In this case it means, just set the result boolean in the Result - * class. - * - * @param result The result of the previous execution - * @param nr the job entry number - * @return The Result of the execution. - */ - public Result execute( Result result, int nr ) throws KettleException { - result.setEntryNr( nr ); - - LogChannelFileWriter logChannelFileWriter = null; - - LogLevel transLogLevel = parentJob.getLogLevel(); - - String realLogFilename = ""; - if ( setLogfile ) { - transLogLevel = logFileLevel; - - realLogFilename = environmentSubstitute( getLogFilename() ); - - // We need to check here the log filename - // if we do not have one, we must fail - if ( Const.isEmpty( realLogFilename ) ) { - logError( BaseMessages.getString( PKG, "JobTrans.Exception.LogFilenameMissing" ) ); - result.setNrErrors( 1 ); - result.setResult( false ); - return result; - } - // create parent folder? - if ( !FileUtil.createParentFolder( PKG, realLogFilename, createParentFolder, this.getLogChannel(), this ) ) { - result.setNrErrors( 1 ); - result.setResult( false ); - return result; - } - try { - logChannelFileWriter = - new LogChannelFileWriter( - this.getLogChannelId(), KettleVFS.getFileObject( realLogFilename ), setAppendLogfile ); - logChannelFileWriter.startLogging(); - } catch ( KettleException e ) { - logError( BaseMessages.getString( PKG, "JobTrans.Error.UnableOpenAppender", realLogFilename, e.toString() ) ); - - logError( Const.getStackTracker( e ) ); - result.setNrErrors( 1 ); - result.setResult( false ); - return result; - } - } + Node parametersNode = XMLHandler.getSubNode(entrynode, "parameters"); - // Figure out the remote slave server... - // - SlaveServer remoteSlaveServer = null; - if ( !Const.isEmpty( remoteSlaveServerName ) ) { - String realRemoteSlaveServerName = environmentSubstitute( remoteSlaveServerName ); - remoteSlaveServer = parentJob.getJobMeta().findSlaveServer( realRemoteSlaveServerName ); - if ( remoteSlaveServer == null ) { - throw new KettleException( BaseMessages.getString( - PKG, "JobTrans.Exception.UnableToFindRemoteSlaveServer", realRemoteSlaveServerName ) ); - } - } + String passAll = XMLHandler.getTagValue(parametersNode, "pass_all_parameters"); + passingAllParameters = Const.isEmpty(passAll) || "Y".equalsIgnoreCase(passAll); - // Open the transformation... - // - switch ( specificationMethod ) { - case FILENAME: - if ( isDetailed() ) { - logDetailed( BaseMessages.getString( - PKG, "JobTrans.Log.OpeningTrans", environmentSubstitute( getFilename() ) ) ); - } - break; - case REPOSITORY_BY_NAME: - if ( isDetailed() ) { - logDetailed( BaseMessages.getString( - PKG, "JobTrans.Log.OpeningTransInDirec", environmentSubstitute( getFilename() ), - environmentSubstitute( directory ) ) ); - } - break; - case REPOSITORY_BY_REFERENCE: - if ( isDetailed() ) { - logDetailed( BaseMessages.getString( PKG, "JobTrans.Log.OpeningTransByReference", transObjectId ) ); + int nrParameters = XMLHandler.countNodes(parametersNode, "parameter"); + allocateParams(nrParameters); + + for (int i = 0; i < nrParameters; i++) { + Node knode = XMLHandler.getSubNodeByNr(parametersNode, "parameter", i); + + parameters[i] = XMLHandler.getTagValue(knode, "name"); + parameterFieldNames[i] = XMLHandler.getTagValue(knode, "stream_name"); + parameterValues[i] = XMLHandler.getTagValue(knode, "value"); + } + } catch (KettleException e) { + throw new KettleXMLException("Unable to load job entry of type 'trans' from XML node", e); } - break; - default: - break; } - // Load the transformation only once for the complete loop! - // Throws an exception if it was not possible to load the transformation. For example, the XML file doesn't exist or - // the repository is down. - // Log the stack trace and return an error condition from this + // Load the jobentry from repository // - TransMeta transMeta = null; - try { - transMeta = getTransMeta( rep, metaStore, this ); - } catch ( KettleException e ) { - logError( Const.getStackTracker( e ) ); - result.setNrErrors( 1 ); - result.setResult( false ); - return result; - } + public void loadRep(Repository rep, IMetaStore metaStore, ObjectId id_jobentry, List databases, + List slaveServers) throws KettleException { + try { + String method = rep.getJobEntryAttributeString(id_jobentry, "specification_method"); + specificationMethod = ObjectLocationSpecificationMethod.getSpecificationMethodByCode(method); + String transId = rep.getJobEntryAttributeString(id_jobentry, "trans_object_id"); + transObjectId = Const.isEmpty(transId) ? null : new StringObjectId(transId); + transname = rep.getJobEntryAttributeString(id_jobentry, "name"); + directory = rep.getJobEntryAttributeString(id_jobentry, "dir_path"); + filename = rep.getJobEntryAttributeString(id_jobentry, "file_name"); + + // Backward compatibility check for object specification + // + checkObjectLocationSpecificationMethod(); + + argFromPrevious = rep.getJobEntryAttributeBoolean(id_jobentry, "arg_from_previous"); + paramsFromPrevious = rep.getJobEntryAttributeBoolean(id_jobentry, "params_from_previous"); + execPerRow = rep.getJobEntryAttributeBoolean(id_jobentry, "exec_per_row"); + clearResultRows = rep.getJobEntryAttributeBoolean(id_jobentry, "clear_rows", true); + clearResultFiles = rep.getJobEntryAttributeBoolean(id_jobentry, "clear_files", true); + setLogfile = rep.getJobEntryAttributeBoolean(id_jobentry, "set_logfile"); + addDate = rep.getJobEntryAttributeBoolean(id_jobentry, "add_date"); + addTime = rep.getJobEntryAttributeBoolean(id_jobentry, "add_time"); + logfile = rep.getJobEntryAttributeString(id_jobentry, "logfile"); + logext = rep.getJobEntryAttributeString(id_jobentry, "logext"); + logFileLevel = LogLevel.getLogLevelForCode(rep.getJobEntryAttributeString(id_jobentry, "loglevel")); + clustering = rep.getJobEntryAttributeBoolean(id_jobentry, "cluster"); + createParentFolder = rep.getJobEntryAttributeBoolean(id_jobentry, "create_parent_folder"); + + remoteSlaveServerName = rep.getJobEntryAttributeString(id_jobentry, "slave_server_name"); + setAppendLogfile = rep.getJobEntryAttributeBoolean(id_jobentry, "set_append_logfile"); + waitingToFinish = rep.getJobEntryAttributeBoolean(id_jobentry, "wait_until_finished", true); + followingAbortRemotely = rep.getJobEntryAttributeBoolean(id_jobentry, "follow_abort_remote"); + loggingRemoteWork = rep.getJobEntryAttributeBoolean(id_jobentry, "logging_remote_work"); + + // How many arguments? + int argnr = rep.countNrJobEntryAttributes(id_jobentry, "argument"); + allocateArgs(argnr); + + // Read all arguments... + for (int a = 0; a < argnr; a++) { + arguments[a] = rep.getJobEntryAttributeString(id_jobentry, a, "argument"); + } - int iteration = 0; - String[] args1 = arguments; - if ( args1 == null || args1.length == 0 ) { // No arguments set, look at the parent job. - args1 = parentJob.getArguments(); - } - // initializeVariablesFrom(parentJob); + // How many arguments? + int parameternr = rep.countNrJobEntryAttributes(id_jobentry, "parameter_name"); + allocateParams(parameternr); - // - // For the moment only do variable translation at the start of a job, not - // for every input row (if that would be switched on). This is for safety, - // the real argument setting is later on. - // - String[] args = null; - if ( args1 != null ) { - args = new String[ args1.length ]; - for ( int idx = 0; idx < args1.length; idx++ ) { - args[ idx ] = environmentSubstitute( args1[ idx ] ); - } + // Read all parameters ... + for (int a = 0; a < parameternr; a++) { + parameters[a] = rep.getJobEntryAttributeString(id_jobentry, a, "parameter_name"); + parameterFieldNames[a] = rep.getJobEntryAttributeString(id_jobentry, a, "parameter_stream_name"); + parameterValues[a] = rep.getJobEntryAttributeString(id_jobentry, a, "parameter_value"); + } + + passingAllParameters = rep.getJobEntryAttributeBoolean(id_jobentry, "pass_all_parameters", true); + + } catch (KettleDatabaseException dbe) { + throw new KettleException("Unable to load job entry of type 'trans' from the repository for id_jobentry=" + + id_jobentry, dbe); + } } - RowMetaAndData resultRow = null; - boolean first = true; - List rows = new ArrayList( result.getRows() ); - - while ( ( first && !execPerRow ) - || ( execPerRow && rows != null && iteration < rows.size() && result.getNrErrors() == 0 ) - && !parentJob.isStopped() ) { - // Clear the result rows of the result - // Otherwise we double the amount of rows every iteration in the simple cases. - // - if ( execPerRow ) { - result.getRows().clear(); - } - if ( rows != null && execPerRow ) { - resultRow = rows.get( iteration ); - } else { - resultRow = null; - } - - NamedParams namedParam = new NamedParamsDefault(); - if ( parameters != null ) { - for ( int idx = 0; idx < parameters.length; idx++ ) { - if ( !Const.isEmpty( parameters[ idx ] ) ) { - // We have a parameter - // - namedParam.addParameterDefinition( parameters[ idx ], "", "Job entry runtime" ); - if ( Const.isEmpty( Const.trim( parameterFieldNames[ idx ] ) ) ) { - // There is no field name specified. - // - String value = Const.NVL( environmentSubstitute( parameterValues[ idx ] ), "" ); - namedParam.setParameterValue( parameters[ idx ], value ); - } else { - // something filled in, in the field column... - // - String value = ""; - if ( resultRow != null ) { - value = resultRow.getString( parameterFieldNames[ idx ], "" ); - } - namedParam.setParameterValue( parameters[ idx ], value ); + // Save the attributes of this job entry + // + public void saveRep(Repository rep, IMetaStore metaStore, ObjectId id_job) throws KettleException { + try { + rep.saveJobEntryAttribute(id_job, getObjectId(), "specification_method", specificationMethod == null + ? null : specificationMethod.getCode()); + rep.saveJobEntryAttribute(id_job, getObjectId(), "trans_object_id", transObjectId == null + ? null : transObjectId.toString()); + rep.saveJobEntryAttribute(id_job, getObjectId(), "name", getTransname()); + rep.saveJobEntryAttribute(id_job, getObjectId(), "dir_path", getDirectory() != null ? getDirectory() : ""); + rep.saveJobEntryAttribute(id_job, getObjectId(), "file_name", filename); + rep.saveJobEntryAttribute(id_job, getObjectId(), "arg_from_previous", argFromPrevious); + rep.saveJobEntryAttribute(id_job, getObjectId(), "params_from_previous", paramsFromPrevious); + rep.saveJobEntryAttribute(id_job, getObjectId(), "exec_per_row", execPerRow); + rep.saveJobEntryAttribute(id_job, getObjectId(), "clear_rows", clearResultRows); + rep.saveJobEntryAttribute(id_job, getObjectId(), "clear_files", clearResultFiles); + rep.saveJobEntryAttribute(id_job, getObjectId(), "set_logfile", setLogfile); + rep.saveJobEntryAttribute(id_job, getObjectId(), "add_date", addDate); + rep.saveJobEntryAttribute(id_job, getObjectId(), "add_time", addTime); + rep.saveJobEntryAttribute(id_job, getObjectId(), "logfile", logfile); + rep.saveJobEntryAttribute(id_job, getObjectId(), "logext", logext); + rep.saveJobEntryAttribute(id_job, getObjectId(), "loglevel", logFileLevel != null + ? logFileLevel.getCode() : null); + rep.saveJobEntryAttribute(id_job, getObjectId(), "cluster", clustering); + rep.saveJobEntryAttribute(id_job, getObjectId(), "slave_server_name", remoteSlaveServerName); + rep.saveJobEntryAttribute(id_job, getObjectId(), "set_append_logfile", setAppendLogfile); + rep.saveJobEntryAttribute(id_job, getObjectId(), "wait_until_finished", waitingToFinish); + rep.saveJobEntryAttribute(id_job, getObjectId(), "follow_abort_remote", followingAbortRemotely); + rep.saveJobEntryAttribute(id_job, getObjectId(), "create_parent_folder", createParentFolder); + rep.saveJobEntryAttribute(id_job, getObjectId(), "logging_remote_work", loggingRemoteWork); + + // Save the arguments... + if (arguments != null) { + for (int i = 0; i < arguments.length; i++) { + rep.saveJobEntryAttribute(id_job, getObjectId(), i, "argument", arguments[i]); + } } - } - } - } - first = false; + // Save the parameters... + if (parameters != null) { + for (int i = 0; i < parameters.length; i++) { + rep.saveJobEntryAttribute(id_job, getObjectId(), i, "parameter_name", parameters[i]); + rep.saveJobEntryAttribute(id_job, getObjectId(), i, "parameter_stream_name", Const.NVL( + parameterFieldNames[i], "")); + rep.saveJobEntryAttribute(id_job, getObjectId(), i, "parameter_value", Const.NVL( + parameterValues[i], "")); + } + } - Result previousResult = result; + rep.saveJobEntryAttribute(id_job, getObjectId(), "pass_all_parameters", passingAllParameters); - try { - if ( isDetailed() ) { - logDetailed( BaseMessages.getString( - PKG, "JobTrans.StartingTrans", getFilename(), getName(), getDescription() ) ); + } catch (KettleDatabaseException dbe) { + throw new KettleException( + "Unable to save job entry of type 'trans' to the repository for id_job=" + id_job, dbe); } + } - if ( clearResultRows ) { - previousResult.setRows( new ArrayList() ); - } + public void clear() { + super.clear(); - if ( clearResultFiles ) { - previousResult.getResultFiles().clear(); - } + specificationMethod = ObjectLocationSpecificationMethod.FILENAME; + transname = null; + filename = null; + directory = null; + arguments = null; + argFromPrevious = false; + execPerRow = false; + addDate = false; + addTime = false; + logfile = null; + logext = null; + setLogfile = false; + clearResultRows = false; + clearResultFiles = false; + remoteSlaveServerName = null; + setAppendLogfile = false; + waitingToFinish = true; + followingAbortRemotely = false; // backward compatibility reasons + createParentFolder = false; + logFileLevel = LogLevel.BASIC; + } - /* - * Set one or more "result" rows on the transformation... - */ - if ( execPerRow ) { - // Execute for each input row - - if ( argFromPrevious ) { - // Copy the input row to the (command line) arguments - - args = null; - if ( resultRow != null ) { - args = new String[ resultRow.size() ]; - for ( int i = 0; i < resultRow.size(); i++ ) { - args[ i ] = resultRow.getString( i, null ); - } + /** + * Execute this job entry and return the result. In this case it means, just set the result boolean in the Result + * class. + * + * @param result The result of the previous execution + * @param nr the job entry number + * @return The Result of the execution. + */ + public Result execute(Result result, int nr) throws KettleException { + result.setEntryNr(nr); + + LogChannelFileWriter logChannelFileWriter = null; + + LogLevel transLogLevel = parentJob.getLogLevel(); + + String realLogFilename = ""; + if (setLogfile) { + transLogLevel = logFileLevel; + + realLogFilename = environmentSubstitute(getLogFilename()); + + // We need to check here the log filename + // if we do not have one, we must fail + if (Const.isEmpty(realLogFilename)) { + logError(BaseMessages.getString(PKG, "JobTrans.Exception.LogFilenameMissing")); + result.setNrErrors(1); + result.setResult(false); + return result; } - } else { - // Just pass a single row - List newList = new ArrayList(); - newList.add( resultRow ); - - // This previous result rows list can be either empty or not. - // Depending on the checkbox "clear result rows" - // In this case, it would execute the transformation with one extra row each time - // Can't figure out a real use-case for it, but hey, who am I to decide that, right? - // :-) - // - previousResult.getRows().addAll( newList ); - } - - if ( paramsFromPrevious ) { // Copy the input the parameters - - if ( parameters != null ) { - for ( int idx = 0; idx < parameters.length; idx++ ) { - if ( !Const.isEmpty( parameters[ idx ] ) ) { - // We have a parameter - if ( Const.isEmpty( Const.trim( parameterFieldNames[ idx ] ) ) ) { - namedParam.setParameterValue( parameters[ idx ], Const.NVL( - environmentSubstitute( parameterValues[ idx ] ), "" ) ); - } else { - String fieldValue = ""; - - if ( resultRow != null ) { - fieldValue = resultRow.getString( parameterFieldNames[ idx ], "" ); - } - // Get the value from the input stream - namedParam.setParameterValue( parameters[ idx ], Const.NVL( fieldValue, "" ) ); - } - } - } - } - } - } else { - if ( argFromPrevious ) { - // Only put the first Row on the arguments - args = null; - if ( resultRow != null ) { - args = new String[ resultRow.size() ]; - for ( int i = 0; i < resultRow.size(); i++ ) { - args[ i ] = resultRow.getString( i, null ); - } + // create parent folder? + if (!FileUtil.createParentFolder(PKG, realLogFilename, createParentFolder, this.getLogChannel(), this)) { + result.setNrErrors(1); + result.setResult(false); + return result; } - } - - if ( paramsFromPrevious ) { - // Copy the input the parameters - if ( parameters != null ) { - for ( int idx = 0; idx < parameters.length; idx++ ) { - if ( !Const.isEmpty( parameters[ idx ] ) ) { - // We have a parameter - if ( Const.isEmpty( Const.trim( parameterFieldNames[ idx ] ) ) ) { - namedParam.setParameterValue( parameters[ idx ], Const.NVL( - environmentSubstitute( parameterValues[ idx ] ), "" ) ); - } else { - String fieldValue = ""; - - if ( resultRow != null ) { - fieldValue = resultRow.getString( parameterFieldNames[ idx ], "" ); - } - // Get the value from the input stream - namedParam.setParameterValue( parameters[ idx ], Const.NVL( fieldValue, "" ) ); - } - } - } + try { + logChannelFileWriter = + new LogChannelFileWriter( + this.getLogChannelId(), KettleVFS.getFileObject(realLogFilename), setAppendLogfile); + logChannelFileWriter.startLogging(); + } catch (KettleException e) { + logError(BaseMessages.getString(PKG, "JobTrans.Error.UnableOpenAppender", realLogFilename, e.toString())); + + logError(Const.getStackTracker(e)); + result.setNrErrors(1); + result.setResult(false); + return result; } - } } - // Handle the parameters... + // Figure out the remote slave server... // - transMeta.clearParameters(); - String[] parameterNames = transMeta.listParameters(); - for ( int idx = 0; idx < parameterNames.length; idx++ ) { - // Grab the parameter value set in the Trans job entry - // - String thisValue = namedParam.getParameterValue( parameterNames[ idx ] ); - if ( !Const.isEmpty( thisValue ) ) { - // Set the value as specified by the user in the job entry - // - transMeta.setParameterValue( parameterNames[ idx ], thisValue ); - } else { - // See if the parameter had a value set in the parent job... - // This value should pass down to the transformation if that's what we opted to do. - // - if ( isPassingAllParameters() ) { - String parentValue = parentJob.getParameterValue( parameterNames[ idx ] ); - if ( !Const.isEmpty( parentValue ) ) { - transMeta.setParameterValue( parameterNames[ idx ], parentValue ); - } + SlaveServer remoteSlaveServer = null; + if (!Const.isEmpty(remoteSlaveServerName)) { + String realRemoteSlaveServerName = environmentSubstitute(remoteSlaveServerName); + remoteSlaveServer = parentJob.getJobMeta().findSlaveServer(realRemoteSlaveServerName); + if (remoteSlaveServer == null) { + throw new KettleException(BaseMessages.getString( + PKG, "JobTrans.Exception.UnableToFindRemoteSlaveServer", realRemoteSlaveServerName)); } - } } - // Execute this transformation across a cluster of servers + // Open the transformation... // - if ( clustering ) { - TransExecutionConfiguration executionConfiguration = new TransExecutionConfiguration(); - executionConfiguration.setClusterPosting( true ); - executionConfiguration.setClusterPreparing( true ); - executionConfiguration.setClusterStarting( true ); - executionConfiguration.setClusterShowingTransformation( false ); - executionConfiguration.setSafeModeEnabled( false ); - executionConfiguration.setRepository( rep ); - executionConfiguration.setLogLevel( transLogLevel ); - executionConfiguration.setPreviousResult( previousResult ); - - // Also pass the variables from the transformation into the execution configuration - // That way it can go over the HTTP connection to the slave server. - // - executionConfiguration.setVariables( transMeta ); - - // Also set the arguments... - // - executionConfiguration.setArgumentStrings( args ); - - if ( parentJob.getJobMeta().isBatchIdPassed() ) { - executionConfiguration.setPassedBatchId( parentJob.getPassedBatchId() ); - } - - TransSplitter transSplitter = null; - long errors = 0; - try { - transSplitter = Trans.executeClustered( transMeta, executionConfiguration ); - - // Monitor the running transformations, wait until they are done. - // Also kill them all if anything goes bad - // Also clean up afterwards... - // - errors += Trans.monitorClusteredTransformation( log, transSplitter, parentJob ); + switch (specificationMethod) { + case FILENAME: + if (isDetailed()) { + logDetailed(BaseMessages.getString( + PKG, "JobTrans.Log.OpeningTrans", environmentSubstitute(getFilename()))); + } + break; + case REPOSITORY_BY_NAME: + if (isDetailed()) { + logDetailed(BaseMessages.getString( + PKG, "JobTrans.Log.OpeningTransInDirec", environmentSubstitute(getFilename()), + environmentSubstitute(directory))); + } + break; + case REPOSITORY_BY_REFERENCE: + if (isDetailed()) { + logDetailed(BaseMessages.getString(PKG, "JobTrans.Log.OpeningTransByReference", transObjectId)); + } + break; + default: + break; + } + + // Load the transformation only once for the complete loop! + // Throws an exception if it was not possible to load the transformation. For example, the XML file doesn't exist or + // the repository is down. + // Log the stack trace and return an error condition from this + // + TransMeta transMeta = null; + try { + transMeta = getTransMeta(rep, metaStore, this); + } catch (KettleException e) { + logError(Const.getStackTracker(e)); + result.setNrErrors(1); + result.setResult(false); + return result; + } + + int iteration = 0; + String[] args1 = arguments; + if (args1 == null || args1.length == 0) { // No arguments set, look at the parent job. + args1 = parentJob.getArguments(); + } + // initializeVariablesFrom(parentJob); - } catch ( Exception e ) { - logError( "Error during clustered execution. Cleaning up clustered execution.", e ); - // In case something goes wrong, make sure to clean up afterwards! + // + // For the moment only do variable translation at the start of a job, not + // for every input row (if that would be switched on). This is for safety, + // the real argument setting is later on. + // + String[] args = null; + if (args1 != null) { + args = new String[args1.length]; + for (int idx = 0; idx < args1.length; idx++) { + args[idx] = environmentSubstitute(args1[idx]); + } + } + + RowMetaAndData resultRow = null; + boolean first = true; + List rows = new ArrayList(result.getRows()); + + while ((first && !execPerRow) + || (execPerRow && rows != null && iteration < rows.size() && result.getNrErrors() == 0) + && !parentJob.isStopped()) { + // Clear the result rows of the result + // Otherwise we double the amount of rows every iteration in the simple cases. // - errors++; - if ( transSplitter != null ) { - Trans.cleanupCluster( log, transSplitter ); + if (execPerRow) { + result.getRows().clear(); + } + if (rows != null && execPerRow) { + resultRow = rows.get(iteration); } else { - // Try to clean anyway... - // - SlaveServer master = null; - for ( StepMeta stepMeta : transMeta.getSteps() ) { - if ( stepMeta.isClustered() ) { - for ( SlaveServer slaveServer : stepMeta.getClusterSchema().getSlaveServers() ) { - if ( slaveServer.isMaster() ) { - master = slaveServer; - break; + resultRow = null; + } + + NamedParams namedParam = new NamedParamsDefault(); + if (parameters != null) { + for (int idx = 0; idx < parameters.length; idx++) { + if (!Const.isEmpty(parameters[idx])) { + // We have a parameter + // + namedParam.addParameterDefinition(parameters[idx], "", "Job entry runtime"); + if (Const.isEmpty(Const.trim(parameterFieldNames[idx]))) { + // There is no field name specified. + // + String value = Const.NVL(environmentSubstitute(parameterValues[idx]), ""); + namedParam.setParameterValue(parameters[idx], value); + } else { + // something filled in, in the field column... + // + String value = ""; + if (resultRow != null) { + value = resultRow.getString(parameterFieldNames[idx], ""); + } + namedParam.setParameterValue(parameters[idx], value); + } } - } } - } - if ( master != null ) { - master.deAllocateServerSockets( transMeta.getName(), null ); - } } - } - - result.clear(); - - if ( transSplitter != null ) { - Result clusterResult = - Trans.getClusteredTransformationResult( log, transSplitter, parentJob, loggingRemoteWork ); - result.add( clusterResult ); - } - - result.setNrErrors( result.getNrErrors() + errors ); - - } else if ( remoteSlaveServer != null ) { - // Execute this transformation remotely - // - - // Make sure we can parameterize the slave server connection - // - remoteSlaveServer.shareVariablesWith( this ); - - // Remote execution... - // - TransExecutionConfiguration transExecutionConfiguration = new TransExecutionConfiguration(); - transExecutionConfiguration.setPreviousResult( previousResult.clone() ); - transExecutionConfiguration.setArgumentStrings( args ); - transExecutionConfiguration.setVariables( this ); - transExecutionConfiguration.setRemoteServer( remoteSlaveServer ); - transExecutionConfiguration.setLogLevel( transLogLevel ); - transExecutionConfiguration.setRepository( rep ); - transExecutionConfiguration.setLogFileName( realLogFilename ); - transExecutionConfiguration.setSetAppendLogfile( setAppendLogfile ); - transExecutionConfiguration.setSetLogfile( setLogfile ); - - Map params = transExecutionConfiguration.getParams(); - for ( String param : transMeta.listParameters() ) { - String value = - Const.NVL( transMeta.getParameterValue( param ), Const.NVL( - transMeta.getParameterDefault( param ), transMeta.getVariable( param ) ) ); - params.put( param, value ); - } - - if ( parentJob.getJobMeta().isBatchIdPassed() ) { - transExecutionConfiguration.setPassedBatchId( parentJob.getPassedBatchId() ); - } - - // Send the XML over to the slave server - // Also start the transformation over there... - // - String carteObjectId = Trans.sendToSlaveServer( transMeta, transExecutionConfiguration, rep, metaStore ); - - // Now start the monitoring... - // - SlaveServerTransStatus transStatus = null; - while ( !parentJob.isStopped() && waitingToFinish ) { + + first = false; + + Result previousResult = result; + try { - transStatus = remoteSlaveServer.getTransStatus( transMeta.getName(), carteObjectId, 0 ); - if ( !transStatus.isRunning() ) { - // The transformation is finished, get the result... - // - Result remoteResult = transStatus.getResult(); - result.clear(); - result.add( remoteResult ); + if (isDetailed()) { + logDetailed(BaseMessages.getString( + PKG, "JobTrans.StartingTrans", getFilename(), getName(), getDescription())); + } + + if (clearResultRows) { + previousResult.setRows(new ArrayList()); + } - // In case you manually stop the remote trans (browser etc), make sure it's marked as an error + if (clearResultFiles) { + previousResult.getResultFiles().clear(); + } + + /* + * Set one or more "result" rows on the transformation... + */ + if (execPerRow) { + // Execute for each input row + + if (argFromPrevious) { + // Copy the input row to the (command line) arguments + + args = null; + if (resultRow != null) { + args = new String[resultRow.size()]; + for (int i = 0; i < resultRow.size(); i++) { + args[i] = resultRow.getString(i, null); + } + } + } else { + // Just pass a single row + List newList = new ArrayList(); + newList.add(resultRow); + + // This previous result rows list can be either empty or not. + // Depending on the checkbox "clear result rows" + // In this case, it would execute the transformation with one extra row each time + // Can't figure out a real use-case for it, but hey, who am I to decide that, right? + // :-) + // + previousResult.getRows().addAll(newList); + } + + if (paramsFromPrevious) { // Copy the input the parameters + + if (parameters != null) { + for (int idx = 0; idx < parameters.length; idx++) { + if (!Const.isEmpty(parameters[idx])) { + // We have a parameter + if (Const.isEmpty(Const.trim(parameterFieldNames[idx]))) { + namedParam.setParameterValue(parameters[idx], Const.NVL( + environmentSubstitute(parameterValues[idx]), "")); + } else { + String fieldValue = ""; + + if (resultRow != null) { + fieldValue = resultRow.getString(parameterFieldNames[idx], ""); + } + // Get the value from the input stream + namedParam.setParameterValue(parameters[idx], Const.NVL(fieldValue, "")); + } + } + } + } + } + } else { + if (argFromPrevious) { + // Only put the first Row on the arguments + args = null; + if (resultRow != null) { + args = new String[resultRow.size()]; + for (int i = 0; i < resultRow.size(); i++) { + args[i] = resultRow.getString(i, null); + } + } + } + + if (paramsFromPrevious) { + // Copy the input the parameters + if (parameters != null) { + for (int idx = 0; idx < parameters.length; idx++) { + if (!Const.isEmpty(parameters[idx])) { + // We have a parameter + if (Const.isEmpty(Const.trim(parameterFieldNames[idx]))) { + namedParam.setParameterValue(parameters[idx], Const.NVL( + environmentSubstitute(parameterValues[idx]), "")); + } else { + String fieldValue = ""; + + if (resultRow != null) { + fieldValue = resultRow.getString(parameterFieldNames[idx], ""); + } + // Get the value from the input stream + namedParam.setParameterValue(parameters[idx], Const.NVL(fieldValue, "")); + } + } + } + } + } + } + + // Handle the parameters... // - if ( remoteResult.isStopped() ) { - result.setNrErrors( result.getNrErrors() + 1 ); // + transMeta.clearParameters(); + String[] parameterNames = transMeta.listParameters(); + for (int idx = 0; idx < parameterNames.length; idx++) { + // Grab the parameter value set in the Trans job entry + // + String thisValue = namedParam.getParameterValue(parameterNames[idx]); + if (!Const.isEmpty(thisValue)) { + // Set the value as specified by the user in the job entry + // + transMeta.setParameterValue(parameterNames[idx], thisValue); + } else { + // See if the parameter had a value set in the parent job... + // This value should pass down to the transformation if that's what we opted to do. + // + if (isPassingAllParameters()) { + String parentValue = parentJob.getParameterValue(parameterNames[idx]); + if (!Const.isEmpty(parentValue)) { + transMeta.setParameterValue(parameterNames[idx], parentValue); + } + } + } } - // Make sure to clean up : write a log record etc, close any left-over sockets etc. + // Execute this transformation across a cluster of servers // - remoteSlaveServer.cleanupTransformation( transMeta.getName(), carteObjectId ); + if (clustering) { + TransExecutionConfiguration executionConfiguration = new TransExecutionConfiguration(); + executionConfiguration.setClusterPosting(true); + executionConfiguration.setClusterPreparing(true); + executionConfiguration.setClusterStarting(true); + executionConfiguration.setClusterShowingTransformation(false); + executionConfiguration.setSafeModeEnabled(false); + executionConfiguration.setRepository(rep); + executionConfiguration.setLogLevel(transLogLevel); + executionConfiguration.setPreviousResult(previousResult); + + // Also pass the variables from the transformation into the execution configuration + // That way it can go over the HTTP connection to the slave server. + // + executionConfiguration.setVariables(transMeta); + + // Also set the arguments... + // + executionConfiguration.setArgumentStrings(args); + + if (parentJob.getJobMeta().isBatchIdPassed()) { + executionConfiguration.setPassedBatchId(parentJob.getPassedBatchId()); + } - break; - } - } catch ( Exception e1 ) { + TransSplitter transSplitter = null; + long errors = 0; + try { + transSplitter = Trans.executeClustered(transMeta, executionConfiguration); + + // Monitor the running transformations, wait until they are done. + // Also kill them all if anything goes bad + // Also clean up afterwards... + // + errors += Trans.monitorClusteredTransformation(log, transSplitter, parentJob); + + } catch (Exception e) { + logError("Error during clustered execution. Cleaning up clustered execution.", e); + // In case something goes wrong, make sure to clean up afterwards! + // + errors++; + if (transSplitter != null) { + Trans.cleanupCluster(log, transSplitter); + } else { + // Try to clean anyway... + // + SlaveServer master = null; + for (StepMeta stepMeta : transMeta.getSteps()) { + if (stepMeta.isClustered()) { + for (SlaveServer slaveServer : stepMeta.getClusterSchema().getSlaveServers()) { + if (slaveServer.isMaster()) { + master = slaveServer; + break; + } + } + } + } + if (master != null) { + master.deAllocateServerSockets(transMeta.getName(), null); + } + } + } - logError( BaseMessages.getString( PKG, "JobTrans.Error.UnableContactSlaveServer", "" - + remoteSlaveServer, transMeta.getName() ), e1 ); - result.setNrErrors( result.getNrErrors() + 1L ); - break; // Stop looking too, chances are too low the server will come back on-line - } + result.clear(); - // sleep for 2 seconds - try { - Thread.sleep( 2000 ); - } catch ( InterruptedException e ) { - // Ignore - } - } + if (transSplitter != null) { + Result clusterResult = + Trans.getClusteredTransformationResult(log, transSplitter, parentJob, loggingRemoteWork); + result.add(clusterResult); + } - if ( parentJob.isStopped() ) { - // See if we have a status and if we need to stop the remote execution here... - // - if ( transStatus == null || transStatus.isRunning() ) { - // Try a remote abort ... - // - remoteSlaveServer.stopTransformation( transMeta.getName(), transStatus.getId() ); - - // And a cleanup... - // - remoteSlaveServer.cleanupTransformation( transMeta.getName(), transStatus.getId() ); - - // Set an error state! - // - result.setNrErrors( result.getNrErrors() + 1L ); - } - } + result.setNrErrors(result.getNrErrors() + errors); + + } else if (remoteSlaveServer != null) { + // Execute this transformation remotely + // + + // Make sure we can parameterize the slave server connection + // + remoteSlaveServer.shareVariablesWith(this); + + // Remote execution... + // + TransExecutionConfiguration transExecutionConfiguration = new TransExecutionConfiguration(); + transExecutionConfiguration.setPreviousResult(previousResult.clone()); + transExecutionConfiguration.setArgumentStrings(args); + transExecutionConfiguration.setVariables(this); + transExecutionConfiguration.setRemoteServer(remoteSlaveServer); + transExecutionConfiguration.setLogLevel(transLogLevel); + transExecutionConfiguration.setRepository(rep); + transExecutionConfiguration.setLogFileName(realLogFilename); + transExecutionConfiguration.setSetAppendLogfile(setAppendLogfile); + transExecutionConfiguration.setSetLogfile(setLogfile); + + Map params = transExecutionConfiguration.getParams(); + for (String param : transMeta.listParameters()) { + String value = + Const.NVL(transMeta.getParameterValue(param), Const.NVL( + transMeta.getParameterDefault(param), transMeta.getVariable(param))); + params.put(param, value); + } - } else { + if (parentJob.getJobMeta().isBatchIdPassed()) { + transExecutionConfiguration.setPassedBatchId(parentJob.getPassedBatchId()); + } - // Execute this transformation on the local machine - // - - // Create the transformation from meta-data - // - trans = new Trans( transMeta, this ); - - // Pass the socket repository as early as possible... - // - trans.setSocketRepository( parentJob.getSocketRepository() ); - - if ( parentJob.getJobMeta().isBatchIdPassed() ) { - trans.setPassedBatchId( parentJob.getPassedBatchId() ); - } - - // set the parent job on the transformation, variables are taken from here... - // - trans.setParentJob( parentJob ); - trans.setParentVariableSpace( parentJob ); - trans.setLogLevel( transLogLevel ); - trans.setPreviousResult( previousResult ); - trans.setArguments( arguments ); - - // Mappings need the repository to load from - // - trans.setRepository( rep ); - - // inject the metaStore - trans.setMetaStore( metaStore ); - - // First get the root job - // - Job rootJob = parentJob; - while ( rootJob.getParentJob() != null ) { - rootJob = rootJob.getParentJob(); - } - - // Get the start and end-date from the root job... - // - trans.setJobStartDate( rootJob.getStartDate() ); - trans.setJobEndDate( rootJob.getEndDate() ); - - // Inform the parent job we started something here... - // - for ( DelegationListener delegationListener : parentJob.getDelegationListeners() ) { - // TODO: copy some settings in the job execution configuration, not strictly needed - // but the execution configuration information is useful in case of a job re-start - // - delegationListener.transformationDelegationStarted( trans, new TransExecutionConfiguration() ); - } + // Send the XML over to the slave server + // Also start the transformation over there... + // + String carteObjectId = Trans.sendToSlaveServer(transMeta, transExecutionConfiguration, rep, metaStore); + + // Now start the monitoring... + // + SlaveServerTransStatus transStatus = null; + while (!parentJob.isStopped() && waitingToFinish) { + try { + transStatus = remoteSlaveServer.getTransStatus(transMeta.getName(), carteObjectId, 0); + if (!transStatus.isRunning()) { + // The transformation is finished, get the result... + // + Result remoteResult = transStatus.getResult(); + result.clear(); + result.add(remoteResult); + + // In case you manually stop the remote trans (browser etc), make sure it's marked as an error + // + if (remoteResult.isStopped()) { + result.setNrErrors(result.getNrErrors() + 1); // + } + + // Make sure to clean up : write a log record etc, close any left-over sockets etc. + // + remoteSlaveServer.cleanupTransformation(transMeta.getName(), carteObjectId); + + break; + } + } catch (Exception e1) { + + logError(BaseMessages.getString(PKG, "JobTrans.Error.UnableContactSlaveServer", "" + + remoteSlaveServer, transMeta.getName()), e1); + result.setNrErrors(result.getNrErrors() + 1L); + break; // Stop looking too, chances are too low the server will come back on-line + } + + // sleep for 2 seconds + try { + Thread.sleep(2000); + } catch (InterruptedException e) { + // Ignore + } + } - try { - // Start execution... - // - trans.execute( args ); + if (parentJob.isStopped()) { + // See if we have a status and if we need to stop the remote execution here... + // + if (transStatus == null || transStatus.isRunning()) { + // Try a remote abort ... + // + remoteSlaveServer.stopTransformation(transMeta.getName(), transStatus.getId()); + + // And a cleanup... + // + remoteSlaveServer.cleanupTransformation(transMeta.getName(), transStatus.getId()); + + // Set an error state! + // + result.setNrErrors(result.getNrErrors() + 1L); + } + } - // Wait until we're done with it... - //TODO is it possible to implement Observer pattern to avoid Thread.sleep here? - while ( !trans.isFinished() && trans.getErrors() == 0 ) { - if ( parentJob.isStopped() ) { - trans.stopAll(); - break; - } else { - try { - Thread.sleep( 0, 500 ); - } catch ( InterruptedException e ) { - // Ignore errors + } else { + + // Execute this transformation on the local machine + // + + // Create the transformation from meta-data + // + trans = new Trans(transMeta, this); + + // Pass the socket repository as early as possible... + // + trans.setSocketRepository(parentJob.getSocketRepository()); + + if (parentJob.getJobMeta().isBatchIdPassed()) { + trans.setPassedBatchId(parentJob.getPassedBatchId()); + } + + // set the parent job on the transformation, variables are taken from here... + // + trans.setParentJob(parentJob); + trans.setParentVariableSpace(parentJob); + trans.setLogLevel(transLogLevel); + trans.setPreviousResult(previousResult); + trans.setArguments(arguments); + + // Mappings need the repository to load from + // + trans.setRepository(rep); + + // inject the metaStore + trans.setMetaStore(metaStore); + + // First get the root job + // + Job rootJob = parentJob; + while (rootJob.getParentJob() != null) { + rootJob = rootJob.getParentJob(); + } + + // Get the start and end-date from the root job... + // + trans.setJobStartDate(rootJob.getStartDate()); + trans.setJobEndDate(rootJob.getEndDate()); + + // Inform the parent job we started something here... + // + for (DelegationListener delegationListener : parentJob.getDelegationListeners()) { + // TODO: copy some settings in the job execution configuration, not strictly needed + // but the execution configuration information is useful in case of a job re-start + // + delegationListener.transformationDelegationStarted(trans, new TransExecutionConfiguration()); + } + + try { + // Start execution... + // + trans.execute(args); + + // Wait until we're done with it... + //TODO is it possible to implement Observer pattern to avoid Thread.sleep here? + while (!trans.isFinished() && trans.getErrors() == 0) { + if (parentJob.isStopped()) { + trans.stopAll(); + break; + } else { + try { + Thread.sleep(0, 500); + } catch (InterruptedException e) { + // Ignore errors + } + } + } + trans.waitUntilFinished(); + + if (parentJob.isStopped() || trans.getErrors() != 0) { + trans.stopAll(); + result.setNrErrors(1); + } + Result newResult = trans.getResult(); + + result.clear(); // clear only the numbers, NOT the files or rows. + result.add(newResult); + + // Set the result rows too, if any ... + if (!Const.isEmpty(newResult.getRows())) { + result.setRows(newResult.getRows()); + } + + if (setLogfile) { + ResultFile resultFile = + new ResultFile( + ResultFile.FILE_TYPE_LOG, KettleVFS.getFileObject(realLogFilename, this), parentJob + .getJobname(), toString() + ); + result.getResultFiles().put(resultFile.getFile().toString(), resultFile); + } + } catch (KettleException e) { + + logError(BaseMessages.getString(PKG, "JobTrans.Error.UnablePrepareExec"), e); + result.setNrErrors(1); + } } - } + } catch (Exception e) { + + logError(BaseMessages.getString(PKG, "JobTrans.ErrorUnableOpenTrans", e.getMessage())); + logError(Const.getStackTracker(e)); + result.setNrErrors(1); } - trans.waitUntilFinished(); + iteration++; + } + + if (setLogfile) { + if (logChannelFileWriter != null) { + logChannelFileWriter.stopLogging(); - if ( parentJob.isStopped() || trans.getErrors() != 0 ) { - trans.stopAll(); - result.setNrErrors( 1 ); + ResultFile resultFile = + new ResultFile( + ResultFile.FILE_TYPE_LOG, logChannelFileWriter.getLogFile(), parentJob.getJobname(), getName()); + result.getResultFiles().put(resultFile.getFile().toString(), resultFile); + + // See if anything went wrong during file writing... + // + if (logChannelFileWriter.getException() != null) { + logError("Unable to open log file [" + getLogFilename() + "] : "); + logError(Const.getStackTracker(logChannelFileWriter.getException())); + result.setNrErrors(1); + result.setResult(false); + return result; + } } - Result newResult = trans.getResult(); + } + + if (result.getNrErrors() == 0) { + result.setResult(true); + } else { + result.setResult(false); + } + + return result; + } + + @Deprecated + public TransMeta getTransMeta(Repository rep, VariableSpace space) throws KettleException { + return getTransMeta(rep, null, space); + } - result.clear(); // clear only the numbers, NOT the files or rows. - result.add( newResult ); + public TransMeta getTransMeta(Repository rep, IMetaStore metaStore, VariableSpace space) throws KettleException { + try { + TransMeta transMeta = null; + CurrentDirectoryResolver r = new CurrentDirectoryResolver(); + VariableSpace tmpSpace = r.resolveCurrentDirectory( + specificationMethod, space, rep, parentJob, getFilename()); + switch (specificationMethod) { + case FILENAME: + String realFilename = tmpSpace.environmentSubstitute(getFilename()); + if (rep != null) { + realFilename = r.normalizeSlashes(realFilename); + // need to try to load from the repository + try { + String dirStr = realFilename.substring(0, realFilename.lastIndexOf("/")); + String tmpFilename = realFilename.substring(realFilename.lastIndexOf("/") + 1); + RepositoryDirectoryInterface dir = rep.findDirectory(dirStr); + transMeta = rep.loadTransformation(tmpFilename, dir, null, true, null); + } catch (KettleException ke) { + // try without extension + if (realFilename.endsWith(Const.STRING_TRANS_DEFAULT_EXT)) { + try { + String tmpFilename = realFilename.substring(realFilename.lastIndexOf("/") + 1, + realFilename.indexOf("." + Const.STRING_TRANS_DEFAULT_EXT)); + String dirStr = realFilename.substring(0, realFilename.lastIndexOf("/")); + RepositoryDirectoryInterface dir = rep.findDirectory(dirStr); + transMeta = rep.loadTransformation(tmpFilename, dir, null, true, null); + } catch (KettleException ke2) { + // fall back to try loading from file system (transMeta is going to be null) + } + } + } + } + if (transMeta == null) { + logBasic("Loading transformation from XML file [" + realFilename + "]"); + transMeta = new TransMeta(realFilename, metaStore, null, true, this, null); + } + break; + case REPOSITORY_BY_NAME: + String transname = tmpSpace.environmentSubstitute(getTransname()); + String realDirectory = tmpSpace.environmentSubstitute(getDirectory()); + + logBasic(BaseMessages.getString(PKG, "JobTrans.Log.LoadingTransRepDirec", transname, realDirectory)); + + if (rep != null) { + // + // It only makes sense to try to load from the repository when the + // repository is also filled in. + // + // It reads last the last revision from the repository. + // + realDirectory = r.normalizeSlashes(realDirectory); + + RepositoryDirectoryInterface repositoryDirectory = rep.findDirectory(realDirectory); + transMeta = rep.loadTransformation(transname, repositoryDirectory, null, true, null); + } else { + // rep is null, let's try loading by filename + try { + transMeta = new TransMeta(realDirectory + "/" + transname, metaStore, null, true, this, null); + } catch (KettleException ke) { + try { + // add .ktr extension and try again + transMeta = new TransMeta(realDirectory + "/" + transname + "." + Const.STRING_TRANS_DEFAULT_EXT, + metaStore, null, true, this, null); + } catch (KettleException ke2) { + throw new KettleException(BaseMessages.getString(PKG, "JobTrans.Exception.NoRepDefined"), ke2); + } + } + } + break; + case REPOSITORY_BY_REFERENCE: + if (transObjectId == null) { + throw new KettleException(BaseMessages.getString(PKG, + "JobTrans.Exception.ReferencedTransformationIdIsNull")); + } - // Set the result rows too, if any ... - if ( !Const.isEmpty( newResult.getRows() ) ) { - result.setRows( newResult.getRows() ); + if (rep != null) { + // Load the last revision + // + transMeta = rep.loadTransformation(transObjectId, null); + } + break; + default: + throw new KettleException("The specified object location specification method '" + + specificationMethod + "' is not yet supported in this job entry."); } - if ( setLogfile ) { - ResultFile resultFile = - new ResultFile( - ResultFile.FILE_TYPE_LOG, KettleVFS.getFileObject( realLogFilename, this ), parentJob - .getJobname(), toString() - ); - result.getResultFiles().put( resultFile.getFile().toString(), resultFile ); + if (transMeta != null) { + // copy parent variables to this loaded variable space. + // + transMeta.copyVariablesFrom(this); + + // Pass repository and metastore references + // + transMeta.setRepository(rep); + transMeta.setMetaStore(metaStore); } - } catch ( KettleException e ) { - logError( BaseMessages.getString( PKG, "JobTrans.Error.UnablePrepareExec" ), e ); - result.setNrErrors( 1 ); - } + return transMeta; + } catch (Exception e) { + throw new KettleException(BaseMessages.getString(PKG, "JobTrans.Exception.MetaDataLoad"), e); } - } catch ( Exception e ) { + } - logError( BaseMessages.getString( PKG, "JobTrans.ErrorUnableOpenTrans", e.getMessage() ) ); - logError( Const.getStackTracker( e ) ); - result.setNrErrors( 1 ); - } - iteration++; + public boolean evaluates() { + return true; } - if ( setLogfile ) { - if ( logChannelFileWriter != null ) { - logChannelFileWriter.stopLogging(); + public boolean isUnconditional() { + return true; + } - ResultFile resultFile = - new ResultFile( - ResultFile.FILE_TYPE_LOG, logChannelFileWriter.getLogFile(), parentJob.getJobname(), getName() ); - result.getResultFiles().put( resultFile.getFile().toString(), resultFile ); + public List getSQLStatements(Repository repository, IMetaStore metaStore, VariableSpace space) throws KettleException { + this.copyVariablesFrom(space); + TransMeta transMeta = getTransMeta(repository, metaStore, this); - // See if anything went wrong during file writing... - // - if ( logChannelFileWriter.getException() != null ) { - logError( "Unable to open log file [" + getLogFilename() + "] : " ); - logError( Const.getStackTracker( logChannelFileWriter.getException() ) ); - result.setNrErrors( 1 ); - result.setResult( false ); - return result; + return transMeta.getSQLStatements(); + } + + /** + * @return Returns the directoryPath. + */ + public String getDirectoryPath() { + return directoryPath; + } + + /** + * @param directoryPath The directoryPath to set. + */ + public void setDirectoryPath(String directoryPath) { + this.directoryPath = directoryPath; + } + + /** + * @return the clustering + */ + public boolean isClustering() { + return clustering; + } + + /** + * @param clustering the clustering to set + */ + public void setClustering(boolean clustering) { + this.clustering = clustering; + } + + public void check(List remarks, JobMeta jobMeta, VariableSpace space, + Repository repository, IMetaStore metaStore) { + if (setLogfile) { + JobEntryValidatorUtils.andValidator().validate(this, "logfile", remarks, AndValidator.putValidators(JobEntryValidatorUtils.notBlankValidator())); + } + if (!Const.isEmpty(filename)) { + JobEntryValidatorUtils.andValidator().validate(this, "filename", remarks, AndValidator.putValidators(JobEntryValidatorUtils.notBlankValidator())); + } else { + JobEntryValidatorUtils.andValidator().validate(this, "transname", remarks, AndValidator.putValidators(JobEntryValidatorUtils.notBlankValidator())); + JobEntryValidatorUtils.andValidator().validate(this, "directory", remarks, AndValidator.putValidators(JobEntryValidatorUtils.notNullValidator())); } - } } - if ( result.getNrErrors() == 0 ) { - result.setResult( true ); - } else { - result.setResult( false ); + public List getResourceDependencies(JobMeta jobMeta) { + List references = super.getResourceDependencies(jobMeta); + if (!Const.isEmpty(filename)) { + // During this phase, the variable space hasn't been initialized yet - it seems + // to happen during the execute. As such, we need to use the job meta's resolution + // of the variables. + String realFileName = jobMeta.environmentSubstitute(filename); + ResourceReference reference = new ResourceReference(this); + reference.getEntries().add(new ResourceEntry(realFileName, ResourceType.ACTIONFILE)); + references.add(reference); + } + return references; } - return result; - } - - @Deprecated - public TransMeta getTransMeta( Repository rep, VariableSpace space ) throws KettleException { - return getTransMeta( rep, null, space ); - } - - public TransMeta getTransMeta( Repository rep, IMetaStore metaStore, VariableSpace space ) throws KettleException { - try { - TransMeta transMeta = null; - CurrentDirectoryResolver r = new CurrentDirectoryResolver(); - VariableSpace tmpSpace = r.resolveCurrentDirectory( - specificationMethod, space, rep, parentJob, getFilename() ); - switch ( specificationMethod ) { - case FILENAME: - String realFilename = tmpSpace.environmentSubstitute( getFilename() ); - if ( rep != null ) { - realFilename = r.normalizeSlashes( realFilename ); - // need to try to load from the repository - try { - String dirStr = realFilename.substring( 0, realFilename.lastIndexOf( "/" ) ); - String tmpFilename = realFilename.substring( realFilename.lastIndexOf( "/" ) + 1 ); - RepositoryDirectoryInterface dir = rep.findDirectory( dirStr ); - transMeta = rep.loadTransformation( tmpFilename, dir, null, true, null ); - } catch ( KettleException ke ) { - // try without extension - if ( realFilename.endsWith( Const.STRING_TRANS_DEFAULT_EXT ) ) { - try { - String tmpFilename = realFilename.substring( realFilename.lastIndexOf( "/" ) + 1, - realFilename.indexOf( "." + Const.STRING_TRANS_DEFAULT_EXT ) ); - String dirStr = realFilename.substring( 0, realFilename.lastIndexOf( "/" ) ); - RepositoryDirectoryInterface dir = rep.findDirectory( dirStr ); - transMeta = rep.loadTransformation( tmpFilename, dir, null, true, null ); - } catch ( KettleException ke2 ) { - // fall back to try loading from file system (transMeta is going to be null) - } - } - } - } - if ( transMeta == null ) { - logBasic( "Loading transformation from XML file [" + realFilename + "]" ); - transMeta = new TransMeta( realFilename, metaStore, null, true, this, null ); - } - break; - case REPOSITORY_BY_NAME: - String transname = tmpSpace.environmentSubstitute( getTransname() ); - String realDirectory = tmpSpace.environmentSubstitute( getDirectory() ); - - logBasic( BaseMessages.getString( PKG, "JobTrans.Log.LoadingTransRepDirec", transname, realDirectory ) ); - - if ( rep != null ) { - // - // It only makes sense to try to load from the repository when the - // repository is also filled in. - // - // It reads last the last revision from the repository. - // - realDirectory = r.normalizeSlashes( realDirectory ); + /** + * We're going to load the transformation meta data referenced here. Then we're going to give it a new filename, + * modify that filename in this entries. The parent caller will have made a copy of it, so it should be OK to do so. + *

+ * Exports the object to a flat-file system, adding content with filename keys to a set of definitions. The supplied + * resource naming interface allows the object to name appropriately without worrying about those parts of the + * implementation specific details. + * + * @param space The variable space to resolve (environment) variables with. + * @param definitions The map containing the filenames and content + * @param namingInterface The resource naming interface allows the object to be named appropriately + * @param repository The repository to load resources from + * @param metaStore the metaStore to load external metadata from + * @return The filename for this object. (also contained in the definitions map) + * @throws KettleException in case something goes wrong during the export + */ + public String exportResources(VariableSpace space, Map definitions, + ResourceNamingInterface namingInterface, Repository repository, IMetaStore metaStore) throws KettleException { + // Try to load the transformation from repository or file. + // Modify this recursively too... + // + // AGAIN: there is no need to clone this job entry because the caller is responsible for this. + // + // First load the transformation metadata... + // + copyVariablesFrom(space); + TransMeta transMeta = getTransMeta(repository, space); - RepositoryDirectoryInterface repositoryDirectory = rep.findDirectory( realDirectory ); - transMeta = rep.loadTransformation( transname, repositoryDirectory, null, true, null ); - } else { - // rep is null, let's try loading by filename - try { - transMeta = new TransMeta( realDirectory + "/" + transname, metaStore, null, true, this, null ); - } catch ( KettleException ke ) { - try { - // add .ktr extension and try again - transMeta = new TransMeta( realDirectory + "/" + transname + "." + Const.STRING_TRANS_DEFAULT_EXT, - metaStore, null, true, this, null ); - } catch ( KettleException ke2 ) { - throw new KettleException( BaseMessages.getString( PKG, "JobTrans.Exception.NoRepDefined" ), ke2 ); - } - } - } - break; - case REPOSITORY_BY_REFERENCE: - if ( transObjectId == null ) { - throw new KettleException( BaseMessages.getString( PKG, - "JobTrans.Exception.ReferencedTransformationIdIsNull" ) ); - } - - if ( rep != null ) { - // Load the last revision - // - transMeta = rep.loadTransformation( transObjectId, null ); - } - break; - default: - throw new KettleException( "The specified object location specification method '" - + specificationMethod + "' is not yet supported in this job entry." ); - } - - if ( transMeta != null ) { - // copy parent variables to this loaded variable space. + // Also go down into the transformation and export the files there. (mapping recursively down) // - transMeta.copyVariablesFrom( this ); + String proposedNewFilename = + transMeta.exportResources(transMeta, definitions, namingInterface, repository, metaStore); - // Pass repository and metastore references + // To get a relative path to it, we inject ${Internal.Job.Filename.Directory} // - transMeta.setRepository( rep ); - transMeta.setMetaStore( metaStore ); - } + String newFilename = "${" + Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY + "}/" + proposedNewFilename; - return transMeta; - } catch ( Exception e ) { - throw new KettleException( BaseMessages.getString( PKG, "JobTrans.Exception.MetaDataLoad" ), e ); + // Set the correct filename inside the XML. + // + transMeta.setFilename(newFilename); + + // exports always reside in the root directory, in case we want to turn this into a file repository... + // + transMeta.setRepositoryDirectory(new RepositoryDirectory()); + + // export to filename ALWAYS (this allows the exported XML to be executed remotely) + // + setSpecificationMethod(ObjectLocationSpecificationMethod.FILENAME); + + // change it in the job entry + // + filename = newFilename; + + return proposedNewFilename; } - } - - public boolean evaluates() { - return true; - } - - public boolean isUnconditional() { - return true; - } - - public List getSQLStatements( Repository repository, IMetaStore metaStore, VariableSpace space ) throws KettleException { - this.copyVariablesFrom( space ); - TransMeta transMeta = getTransMeta( repository, metaStore, this ); - - return transMeta.getSQLStatements(); - } - - /** - * @return Returns the directoryPath. - */ - public String getDirectoryPath() { - return directoryPath; - } - - /** - * @param directoryPath The directoryPath to set. - */ - public void setDirectoryPath( String directoryPath ) { - this.directoryPath = directoryPath; - } - - /** - * @return the clustering - */ - public boolean isClustering() { - return clustering; - } - - /** - * @param clustering the clustering to set - */ - public void setClustering( boolean clustering ) { - this.clustering = clustering; - } - - public void check( List remarks, JobMeta jobMeta, VariableSpace space, - Repository repository, IMetaStore metaStore ) { - if ( setLogfile ) { - JobEntryValidatorUtils.andValidator().validate( this, "logfile", remarks, AndValidator.putValidators( JobEntryValidatorUtils.notBlankValidator() ) ); + + protected String getLogfile() { + return logfile; } - if ( !Const.isEmpty( filename ) ) { - JobEntryValidatorUtils.andValidator().validate( this, "filename", remarks, AndValidator.putValidators( JobEntryValidatorUtils.notBlankValidator() ) ); - } else { - JobEntryValidatorUtils.andValidator().validate( this, "transname", remarks, AndValidator.putValidators( JobEntryValidatorUtils.notBlankValidator() ) ); - JobEntryValidatorUtils.andValidator().validate( this, "directory", remarks, AndValidator.putValidators( JobEntryValidatorUtils.notNullValidator() ) ); + + /** + * @return the remote slave server name + */ + public String getRemoteSlaveServerName() { + return remoteSlaveServerName; } - } - - public List getResourceDependencies( JobMeta jobMeta ) { - List references = super.getResourceDependencies( jobMeta ); - if ( !Const.isEmpty( filename ) ) { - // During this phase, the variable space hasn't been initialized yet - it seems - // to happen during the execute. As such, we need to use the job meta's resolution - // of the variables. - String realFileName = jobMeta.environmentSubstitute( filename ); - ResourceReference reference = new ResourceReference( this ); - reference.getEntries().add( new ResourceEntry( realFileName, ResourceType.ACTIONFILE ) ); - references.add( reference ); + + /** + * @param remoteSlaveServerName the remote slave server name to set + */ + public void setRemoteSlaveServerName(String remoteSlaveServerName) { + this.remoteSlaveServerName = remoteSlaveServerName; } - return references; - } - - /** - * We're going to load the transformation meta data referenced here. Then we're going to give it a new filename, - * modify that filename in this entries. The parent caller will have made a copy of it, so it should be OK to do so. - *

- * Exports the object to a flat-file system, adding content with filename keys to a set of definitions. The supplied - * resource naming interface allows the object to name appropriately without worrying about those parts of the - * implementation specific details. - * - * @param space The variable space to resolve (environment) variables with. - * @param definitions The map containing the filenames and content - * @param namingInterface The resource naming interface allows the object to be named appropriately - * @param repository The repository to load resources from - * @param metaStore the metaStore to load external metadata from - * @return The filename for this object. (also contained in the definitions map) - * @throws KettleException in case something goes wrong during the export - */ - public String exportResources( VariableSpace space, Map definitions, - ResourceNamingInterface namingInterface, Repository repository, IMetaStore metaStore ) throws KettleException { - // Try to load the transformation from repository or file. - // Modify this recursively too... - // - // AGAIN: there is no need to clone this job entry because the caller is responsible for this. - // - // First load the transformation metadata... - // - copyVariablesFrom( space ); - TransMeta transMeta = getTransMeta( repository, space ); - // Also go down into the transformation and export the files there. (mapping recursively down) - // - String proposedNewFilename = - transMeta.exportResources( transMeta, definitions, namingInterface, repository, metaStore ); + /** + * @return the waitingToFinish + */ + public boolean isWaitingToFinish() { + return waitingToFinish; + } - // To get a relative path to it, we inject ${Internal.Job.Filename.Directory} - // - String newFilename = "${" + Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY + "}/" + proposedNewFilename; + /** + * @param waitingToFinish the waitingToFinish to set + */ + public void setWaitingToFinish(boolean waitingToFinish) { + this.waitingToFinish = waitingToFinish; + } - // Set the correct filename inside the XML. - // - transMeta.setFilename( newFilename ); + /** + * @return the followingAbortRemotely + */ + public boolean isFollowingAbortRemotely() { + return followingAbortRemotely; + } - // exports always reside in the root directory, in case we want to turn this into a file repository... - // - transMeta.setRepositoryDirectory( new RepositoryDirectory() ); + /** + * @param followingAbortRemotely the followingAbortRemotely to set + */ + public void setFollowingAbortRemotely(boolean followingAbortRemotely) { + this.followingAbortRemotely = followingAbortRemotely; + } - // export to filename ALWAYS (this allows the exported XML to be executed remotely) - // - setSpecificationMethod( ObjectLocationSpecificationMethod.FILENAME ); + public boolean isLoggingRemoteWork() { + return loggingRemoteWork; + } - // change it in the job entry - // - filename = newFilename; - - return proposedNewFilename; - } - - protected String getLogfile() { - return logfile; - } - - /** - * @return the remote slave server name - */ - public String getRemoteSlaveServerName() { - return remoteSlaveServerName; - } - - /** - * @param remoteSlaveServerName the remote slave server name to set - */ - public void setRemoteSlaveServerName( String remoteSlaveServerName ) { - this.remoteSlaveServerName = remoteSlaveServerName; - } - - /** - * @return the waitingToFinish - */ - public boolean isWaitingToFinish() { - return waitingToFinish; - } - - /** - * @param waitingToFinish the waitingToFinish to set - */ - public void setWaitingToFinish( boolean waitingToFinish ) { - this.waitingToFinish = waitingToFinish; - } - - /** - * @return the followingAbortRemotely - */ - public boolean isFollowingAbortRemotely() { - return followingAbortRemotely; - } - - /** - * @param followingAbortRemotely the followingAbortRemotely to set - */ - public void setFollowingAbortRemotely( boolean followingAbortRemotely ) { - this.followingAbortRemotely = followingAbortRemotely; - } - - public boolean isLoggingRemoteWork() { - return loggingRemoteWork; - } - - public void setLoggingRemoteWork( boolean loggingRemoteWork ) { - this.loggingRemoteWork = loggingRemoteWork; - } - - /** - * @return the passingAllParameters - */ - public boolean isPassingAllParameters() { - return passingAllParameters; - } - - /** - * @param passingAllParameters the passingAllParameters to set - */ - public void setPassingAllParameters( boolean passingAllParameters ) { - this.passingAllParameters = passingAllParameters; - } - - public Trans getTrans() { - return trans; - } - - /** - * @return the transObjectId - */ - public ObjectId getTransObjectId() { - return transObjectId; - } - - /** - * @param transObjectId the transObjectId to set - */ - public void setTransObjectId( ObjectId transObjectId ) { - this.transObjectId = transObjectId; - } - - /** - * @return the specificationMethod - */ - public ObjectLocationSpecificationMethod getSpecificationMethod() { - return specificationMethod; - } - - /** - * @param specificationMethod the specificationMethod to set - */ - public void setSpecificationMethod( ObjectLocationSpecificationMethod specificationMethod ) { - this.specificationMethod = specificationMethod; - } - - public boolean hasRepositoryReferences() { - return specificationMethod == ObjectLocationSpecificationMethod.REPOSITORY_BY_REFERENCE; - } - - /** - * Look up the references after import - * - * @param repository the repository to reference. - */ - public void lookupRepositoryReferences( Repository repository ) throws KettleException { - // The correct reference is stored in the trans name and directory attributes... - // - RepositoryDirectoryInterface repositoryDirectoryInterface = - RepositoryImportLocation.getRepositoryImportLocation().findDirectory( directory ); - transObjectId = repository.getTransformationID( transname, repositoryDirectoryInterface ); - } - - /** - * @return The objects referenced in the step, like a a transformation, a job, a mapper, a reducer, a combiner, ... - */ - public String[] getReferencedObjectDescriptions() { - return new String[] { BaseMessages.getString( PKG, "JobEntryTrans.ReferencedObject.Description" ), }; - } - - private boolean isTransformationDefined() { - return !Const.isEmpty( filename ) - || transObjectId != null || ( !Const.isEmpty( this.directory ) && !Const.isEmpty( transname ) ); - } - - public boolean[] isReferencedObjectEnabled() { - return new boolean[] { isTransformationDefined(), }; - } - - /** - * Load the referenced object - * - * @param index the referenced object index to load (in case there are multiple references) - * @param rep the repository - * @param metaStore metaStore - * @param space the variable space to use - * @return the referenced object once loaded - * @throws KettleException - */ - public Object loadReferencedObject( int index, Repository rep, IMetaStore metaStore, VariableSpace space ) throws KettleException { - return getTransMeta( rep, metaStore, space ); - } + public void setLoggingRemoteWork(boolean loggingRemoteWork) { + this.loggingRemoteWork = loggingRemoteWork; + } + + /** + * @return the passingAllParameters + */ + public boolean isPassingAllParameters() { + return passingAllParameters; + } + + /** + * @param passingAllParameters the passingAllParameters to set + */ + public void setPassingAllParameters(boolean passingAllParameters) { + this.passingAllParameters = passingAllParameters; + } + + public Trans getTrans() { + return trans; + } + + /** + * @return the transObjectId + */ + public ObjectId getTransObjectId() { + return transObjectId; + } + + /** + * @param transObjectId the transObjectId to set + */ + public void setTransObjectId(ObjectId transObjectId) { + this.transObjectId = transObjectId; + } + + /** + * @return the specificationMethod + */ + public ObjectLocationSpecificationMethod getSpecificationMethod() { + return specificationMethod; + } + + /** + * @param specificationMethod the specificationMethod to set + */ + public void setSpecificationMethod(ObjectLocationSpecificationMethod specificationMethod) { + this.specificationMethod = specificationMethod; + } + + public boolean hasRepositoryReferences() { + return specificationMethod == ObjectLocationSpecificationMethod.REPOSITORY_BY_REFERENCE; + } + + /** + * Look up the references after import + * + * @param repository the repository to reference. + */ + public void lookupRepositoryReferences(Repository repository) throws KettleException { + // The correct reference is stored in the trans name and directory attributes... + // + RepositoryDirectoryInterface repositoryDirectoryInterface = + RepositoryImportLocation.getRepositoryImportLocation().findDirectory(directory); + transObjectId = repository.getTransformationID(transname, repositoryDirectoryInterface); + } + + /** + * @return The objects referenced in the step, like a a transformation, a job, a mapper, a reducer, a combiner, ... + */ + public String[] getReferencedObjectDescriptions() { + return new String[]{BaseMessages.getString(PKG, "JobEntryTrans.ReferencedObject.Description"),}; + } + + private boolean isTransformationDefined() { + return !Const.isEmpty(filename) + || transObjectId != null || (!Const.isEmpty(this.directory) && !Const.isEmpty(transname)); + } + + public boolean[] isReferencedObjectEnabled() { + return new boolean[]{isTransformationDefined(),}; + } + + /** + * Load the referenced object + * + * @param index the referenced object index to load (in case there are multiple references) + * @param rep the repository + * @param metaStore metaStore + * @param space the variable space to use + * @return the referenced object once loaded + * @throws KettleException + */ + public Object loadReferencedObject(int index, Repository rep, IMetaStore metaStore, VariableSpace space) throws KettleException { + return getTransMeta(rep, metaStore, space); + } } diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/repository/pur/LazyUnifiedRepositoryDirectory.java b/pentaho-kettle/src/main/java/org/pentaho/di/repository/pur/LazyUnifiedRepositoryDirectory.java index 5f68579..c05dd3d 100644 --- a/pentaho-kettle/src/main/java/org/pentaho/di/repository/pur/LazyUnifiedRepositoryDirectory.java +++ b/pentaho-kettle/src/main/java/org/pentaho/di/repository/pur/LazyUnifiedRepositoryDirectory.java @@ -18,12 +18,7 @@ import org.apache.commons.lang.StringUtils; import org.pentaho.di.core.exception.KettleException; -import org.pentaho.di.repository.ObjectId; -import org.pentaho.di.repository.RepositoryDirectory; -import org.pentaho.di.repository.RepositoryDirectoryInterface; -import org.pentaho.di.repository.RepositoryElementMetaInterface; -import org.pentaho.di.repository.RepositoryObjectType; -import org.pentaho.di.repository.StringObjectId; +import org.pentaho.di.repository.*; import org.pentaho.di.repository.pur.model.EERepositoryObject; import org.pentaho.di.repository.pur.model.RepositoryLock; import org.pentaho.di.ui.repository.pur.services.ILockService; @@ -51,298 +46,321 @@ */ public class LazyUnifiedRepositoryDirectory extends RepositoryDirectory { - private RepositoryFile self; - private IUnifiedRepository repository; - private RepositoryServiceRegistry registry; - private List subdirectories; - private List fileChildren; - private RepositoryDirectoryInterface parent; - private Logger logger = LoggerFactory.getLogger( getClass() ); - - public LazyUnifiedRepositoryDirectory( RepositoryFile self, RepositoryDirectoryInterface parent, - IUnifiedRepository repository, RepositoryServiceRegistry registry ) { - this.self = self; - this.parent = parent; - this.repository = repository; - this.registry = registry; - } - - private String getParentPath( String absolutePath ) { - int parentEndIndex; - if ( absolutePath.endsWith( RepositoryDirectory.DIRECTORY_SEPARATOR ) ) { - parentEndIndex = absolutePath.lastIndexOf( RepositoryDirectory.DIRECTORY_SEPARATOR, absolutePath.length() - 2 ); - } else { - parentEndIndex = absolutePath.lastIndexOf( RepositoryDirectory.DIRECTORY_SEPARATOR ); + private RepositoryFile self; + private IUnifiedRepository repository; + private RepositoryServiceRegistry registry; + private List subdirectories; + private List fileChildren; + private RepositoryDirectoryInterface parent; + private Logger logger = LoggerFactory.getLogger(getClass()); + + public LazyUnifiedRepositoryDirectory(RepositoryFile self, RepositoryDirectoryInterface parent, + IUnifiedRepository repository, RepositoryServiceRegistry registry) { + this.self = self; + this.parent = parent; + this.repository = repository; + this.registry = registry; } - if ( parentEndIndex < 0 ) { - return null; - } - return absolutePath.substring( 0, parentEndIndex ); - } - @Override public RepositoryDirectory findDirectory( String path ) { - if ( StringUtils.isEmpty( path ) ) { - return null; - } - String absolutePath; - if ( path.startsWith( RepositoryDirectory.DIRECTORY_SEPARATOR ) ) { - if ( self.getPath().endsWith( RepositoryDirectory.DIRECTORY_SEPARATOR ) ) { - absolutePath = self.getPath() + path.substring( 1 ); - } else { - absolutePath = self.getPath() + path; - } - } else { - if ( self.getPath().endsWith( RepositoryDirectory.DIRECTORY_SEPARATOR ) ) { - absolutePath = self.getPath() + path; - } else { - absolutePath = self.getPath() + RepositoryDirectory.DIRECTORY_SEPARATOR + path; - } + private String getParentPath(String absolutePath) { + int parentEndIndex; + if (absolutePath.endsWith(RepositoryDirectory.DIRECTORY_SEPARATOR)) { + parentEndIndex = absolutePath.lastIndexOf(RepositoryDirectory.DIRECTORY_SEPARATOR, absolutePath.length() - 2); + } else { + parentEndIndex = absolutePath.lastIndexOf(RepositoryDirectory.DIRECTORY_SEPARATOR); + } + if (parentEndIndex < 0) { + return null; + } + return absolutePath.substring(0, parentEndIndex); } - RepositoryFile file = repository.getFile( absolutePath ); - if ( file == null || !file.isFolder() ) { - return null; - } - if ( isRoot() && RepositoryDirectory.DIRECTORY_SEPARATOR.equals( absolutePath ) ) { - return this; - } + @Override + public RepositoryDirectory findDirectory(String path) { + if (StringUtils.isEmpty(path)) { + return null; + } + String absolutePath; + if (path.startsWith(RepositoryDirectory.DIRECTORY_SEPARATOR)) { + if (self.getPath().endsWith(RepositoryDirectory.DIRECTORY_SEPARATOR)) { + absolutePath = self.getPath() + path.substring(1); + } else { + absolutePath = self.getPath() + path; + } + } else { + if (self.getPath().endsWith(RepositoryDirectory.DIRECTORY_SEPARATOR)) { + absolutePath = self.getPath() + path; + } else { + absolutePath = self.getPath() + RepositoryDirectory.DIRECTORY_SEPARATOR + path; + } + } - // Verifies if this is the parent directory of file and if so passes this as parent argument - String parentPath = getParentPath( absolutePath ); - if ( self.getPath().endsWith( RepositoryDirectory.DIRECTORY_SEPARATOR ) ) { - if ( parentPath.equals( self.getPath().substring( 0, self.getPath().length() - 1 ) ) ) { - return new LazyUnifiedRepositoryDirectory( file, this, repository, registry ); - } - } else { - if ( parentPath.equals( self.getPath() ) ) { - return new LazyUnifiedRepositoryDirectory( file, this, repository, registry ); - } - } + RepositoryFile file = repository.getFile(absolutePath); + if (file == null || !file.isFolder()) { + return null; + } + if (isRoot() && RepositoryDirectory.DIRECTORY_SEPARATOR.equals(absolutePath)) { + return this; + } + + // Verifies if this is the parent directory of file and if so passes this as parent argument + String parentPath = getParentPath(absolutePath); + if (self.getPath().endsWith(RepositoryDirectory.DIRECTORY_SEPARATOR)) { + if (parentPath.equals(self.getPath().substring(0, self.getPath().length() - 1))) { + return new LazyUnifiedRepositoryDirectory(file, this, repository, registry); + } + } else { + if (parentPath.equals(self.getPath())) { + return new LazyUnifiedRepositoryDirectory(file, this, repository, registry); + } + } - return new LazyUnifiedRepositoryDirectory( file, findDirectory( parentPath ), repository, registry ); + return new LazyUnifiedRepositoryDirectory(file, findDirectory(parentPath), repository, registry); - } + } - @Override public RepositoryDirectory findChild( String name ) { - return findDirectory( name ); - } + @Override + public RepositoryDirectory findChild(String name) { + return findDirectory(name); + } - @Override public RepositoryDirectory findDirectory( String[] path ) { - return findDirectory( StringUtils.join( path, "/" ) ); - } + @Override + public RepositoryDirectory findDirectory(String[] path) { + return findDirectory(StringUtils.join(path, "/")); + } - @Override public List getChildren() { - if ( subdirectories == null ) { - subdirectories = new ArrayList<>(); - synchronized ( subdirectories ) { - List children = getAllURChildrenFiles(); - for ( RepositoryFile child : children ) { - LazyUnifiedRepositoryDirectory dir = new LazyUnifiedRepositoryDirectory( child, this, repository, registry ); - dir.setObjectId( new StringObjectId( child.getId().toString() ) ); - this.addSubdirectory( dir ); + @Override + public List getChildren() { + if (subdirectories == null) { + subdirectories = new ArrayList<>(); + synchronized (subdirectories) { + List children = getAllURChildrenFiles(); + for (RepositoryFile child : children) { + LazyUnifiedRepositoryDirectory dir = new LazyUnifiedRepositoryDirectory(child, this, repository, registry); + dir.setObjectId(new StringObjectId(child.getId().toString())); + this.addSubdirectory(dir); + } + } } - } + return subdirectories; } - return subdirectories; - } - - @Override public List getRepositoryObjects() { - if ( fileChildren == null ) { - fileChildren = new ArrayList(); - synchronized ( fileChildren ) { + @Override + public List getRepositoryObjects() { + if (fileChildren == null) { + + fileChildren = new ArrayList(); + synchronized (fileChildren) { + + UnifiedRepositoryLockService lockService = + (UnifiedRepositoryLockService) registry.getService(ILockService.class); + + RepositoryFileTree tree = repository.getTree(new RepositoryRequest(this.self.getPath(), true, 1, null)); + + for (RepositoryFileTree tchild : tree.getChildren()) { + RepositoryFile child = tchild.getFile(); + + RepositoryLock lock = null; + try { + lock = lockService.getLock(child); + RepositoryObjectType objectType = getObjectType(child.getName()); + EERepositoryObject repositoryObject = + new EERepositoryObject(child, this, null, objectType, null, lock, false); + + repositoryObject.setVersioningEnabled(tchild.getVersioningEnabled()); + repositoryObject.setVersionCommentEnabled(tchild.getVersionCommentEnabled()); + fileChildren.add(repositoryObject); + } catch (KettleException e) { + logger.error("Error converting Unified Repository file to PDI RepositoryObject: " + child.getPath() + + ". File will be skipped", e); + } + } + } + } + return fileChildren; - UnifiedRepositoryLockService lockService = - (UnifiedRepositoryLockService) registry.getService( ILockService.class ); + } - RepositoryFileTree tree = repository.getTree( new RepositoryRequest( this.self.getPath(), true, 1, null ) ); + @Override + public void setRepositoryObjects(List list) { + synchronized (fileChildren) { + fileChildren.clear(); + fileChildren.addAll(list); + } + } - for ( RepositoryFileTree tchild : tree.getChildren() ) { - RepositoryFile child = tchild.getFile(); + @Override + public boolean isVisible() { + return !isRoot() && !self.isHidden(); + } - RepositoryLock lock = null; - try { - lock = lockService.getLock( child ); - RepositoryObjectType objectType = getObjectType( child.getName() ); - EERepositoryObject repositoryObject = - new EERepositoryObject( child, this, null, objectType, null, lock, false ); - repositoryObject.setVersioningEnabled( tchild.getVersioningEnabled() ); - repositoryObject.setVersionCommentEnabled( tchild.getVersionCommentEnabled() ); - fileChildren.add( repositoryObject ); - } catch ( KettleException e ) { - logger.error( "Error converting Unified Repository file to PDI RepositoryObject: " + child.getPath() - + ". File will be skipped", e ); - } - } - } + @Override + public int getNrSubdirectories() { + List childrenFiles = getAllURChildrenFiles(); + return childrenFiles.size(); } - return fileChildren; - } + @Override + public RepositoryDirectory getSubdirectory(int i) { + if (subdirectories == null) { + getChildren(); + } - @Override public void setRepositoryObjects( List list ) { - synchronized ( fileChildren ) { - fileChildren.clear(); - fileChildren.addAll( list ); + if (i >= subdirectories.size() || i < 0) { + return null; + } + RepositoryDirectoryInterface directoryInterface = subdirectories.get(i); + // Have to cast due to bad interface + if (directoryInterface instanceof RepositoryDirectory) { + return (RepositoryDirectory) directoryInterface; + } + throw new IllegalStateException( + "Bad Repository interface expects RepositoryDirectoryInterface to be an instance of" + + " RepositoryDirectory. This class is not: " + directoryInterface.getClass().getName()); } - } - @Override public boolean isVisible() { - return !isRoot() && !self.isHidden(); - } + private List getAllURChildrenFiles() { + RepositoryRequest repositoryRequest = new RepositoryRequest(); + repositoryRequest.setShowHidden(true); + repositoryRequest.setTypes(RepositoryRequest.FILES_TYPE_FILTER.FOLDERS); + repositoryRequest.setPath(this.self.getId().toString()); + List children = repository.getChildren(repositoryRequest); - @Override public int getNrSubdirectories() { - List childrenFiles = getAllURChildrenFiles(); - return childrenFiles.size(); - } + // Special case: /etc should not be returned from a directory listing. + RepositoryFile etcFile = null; + if (this.isRoot()) { + etcFile = repository.getFile(ClientRepositoryPaths.getEtcFolderPath()); + } - @Override public RepositoryDirectory getSubdirectory( int i ) { - if ( subdirectories == null ) { - getChildren(); + // Filter for Folders only doesn't appear to work + Iterator iterator = children.iterator(); + while (iterator.hasNext()) { + RepositoryFile next = iterator.next(); + if (!next.isFolder()) { + iterator.remove(); + } + + // Special case: /etc should not be returned from a directory listing. + if (this.isRoot() && next.equals(etcFile)) { + iterator.remove(); + } + } + return children; } - if ( i >= subdirectories.size() || i < 0 ) { - return null; - } - RepositoryDirectoryInterface directoryInterface = subdirectories.get( i ); - // Have to cast due to bad interface - if ( directoryInterface instanceof RepositoryDirectory ) { - return (RepositoryDirectory) directoryInterface; - } - throw new IllegalStateException( - "Bad Repository interface expects RepositoryDirectoryInterface to be an instance of" - + " RepositoryDirectory. This class is not: " + directoryInterface.getClass().getName() ); - } - - private List getAllURChildrenFiles() { - RepositoryRequest repositoryRequest = new RepositoryRequest(); - repositoryRequest.setShowHidden( true ); - repositoryRequest.setTypes( RepositoryRequest.FILES_TYPE_FILTER.FOLDERS ); - repositoryRequest.setPath( this.self.getId().toString() ); - List children = repository.getChildren( repositoryRequest ); - - - // Special case: /etc should not be returned from a directory listing. - RepositoryFile etcFile = null; - if ( this.isRoot() ) { - etcFile = repository.getFile( ClientRepositoryPaths.getEtcFolderPath() ); + @Override + public void clear() { + if (this.fileChildren != null) { + synchronized (fileChildren) { + this.fileChildren.clear(); + } + } + if (this.subdirectories != null) { + synchronized (subdirectories) { + this.subdirectories.clear(); + } + } } - // Filter for Folders only doesn't appear to work - Iterator iterator = children.iterator(); - while ( iterator.hasNext() ) { - RepositoryFile next = iterator.next(); - if ( !next.isFolder() ) { - iterator.remove(); - } - - // Special case: /etc should not be returned from a directory listing. - if ( this.isRoot() && next.equals( etcFile ) ) { - iterator.remove(); - } - } - return children; - } - - @Override public void clear() { - if ( this.fileChildren != null ) { - synchronized ( fileChildren ) { - this.fileChildren.clear(); - } - } - if ( this.subdirectories != null ) { - synchronized ( subdirectories ) { - this.subdirectories.clear(); - } + @Override + public void addSubdirectory(RepositoryDirectoryInterface repositoryDirectoryInterface) { + if (subdirectories == null) { + subdirectories = new ArrayList<>(); + } + synchronized (subdirectories) { + this.subdirectories.add(repositoryDirectoryInterface); + } } - } - @Override public void addSubdirectory( RepositoryDirectoryInterface repositoryDirectoryInterface ) { - if ( subdirectories == null ) { - subdirectories = new ArrayList<>(); + @Override + public String getName() { + return self.getName(); } - synchronized ( subdirectories ) { - this.subdirectories.add( repositoryDirectoryInterface ); - } - } - - @Override public String getName() { - return self.getName(); - } - @Override public String getPath() { - return self.getPath(); - } + @Override + public String getPath() { + return self.getPath(); + } - @Override public ObjectId getObjectId() { - return new StringObjectId( self.getId().toString() ); - } + @Override + public ObjectId getObjectId() { + return new StringObjectId(self.getId().toString()); + } - @Override public void setChildren( List list ) { - if ( subdirectories == null ) { - subdirectories = new ArrayList<>(); + @Override + public void setChildren(List list) { + if (subdirectories == null) { + subdirectories = new ArrayList<>(); + } + if (!subdirectories.equals(list)) { + synchronized (subdirectories) { + subdirectories.clear(); + subdirectories.addAll(list); + } + } } - if ( !subdirectories.equals( list ) ) { - synchronized ( subdirectories ) { - subdirectories.clear(); - subdirectories.addAll( list ); - } + + @Override + public String[] getPathArray() { + return getPath().split(RepositoryDirectory.DIRECTORY_SEPARATOR); } - } - @Override public String[] getPathArray() { - return getPath().split( RepositoryDirectory.DIRECTORY_SEPARATOR ); - } + @Override + public ObjectId[] getDirectoryIDs() { + List children = this.getAllURChildrenFiles(); + ObjectId[] objectIds = new ObjectId[children.size()]; + for (int i = 0; i < children.size(); i++) { + objectIds[i] = new StringObjectId(children.get(i).getId().toString()); + } - @Override public ObjectId[] getDirectoryIDs() { - List children = this.getAllURChildrenFiles(); - ObjectId[] objectIds = new ObjectId[ children.size() ]; - for ( int i = 0; i < children.size(); i++ ) { - objectIds[ i ] = new StringObjectId( children.get( i ).getId().toString() ); + return objectIds; } - return objectIds; - } - - @Override public boolean isRoot() { - return parent == null; - } + @Override + public boolean isRoot() { + return parent == null; + } - @Override public RepositoryDirectoryInterface findRoot() { - RepositoryDirectoryInterface current = this; - RepositoryDirectoryInterface parent = null; - while ( ( parent = current.getParent() ) != null ) { - current = parent; + @Override + public RepositoryDirectoryInterface findRoot() { + RepositoryDirectoryInterface current = this; + RepositoryDirectoryInterface parent = null; + while ((parent = current.getParent()) != null) { + current = parent; + } + return current; } - return current; - } - @Override public void setParent( RepositoryDirectoryInterface repositoryDirectoryInterface ) { - this.parent = repositoryDirectoryInterface; - } + @Override + public void setParent(RepositoryDirectoryInterface repositoryDirectoryInterface) { + this.parent = repositoryDirectoryInterface; + } - @Override public RepositoryDirectoryInterface getParent() { - return parent; - } + @Override + public RepositoryDirectoryInterface getParent() { + return parent; + } - @Override public void setObjectId( ObjectId objectId ) { - // ignore - } + @Override + public void setObjectId(ObjectId objectId) { + // ignore + } - @Override public void setName( String s ) { - // ignore - } + @Override + public void setName(String s) { + // ignore + } - @Override - public String getPathObjectCombination( String transName ) { - if ( isRoot() ) { - return getPath() + transName; - } else { - return getPath() + RepositoryDirectory.DIRECTORY_SEPARATOR + transName; + @Override + public String getPathObjectCombination(String transName) { + if (isRoot()) { + return getPath() + transName; + } else { + return getPath() + RepositoryDirectory.DIRECTORY_SEPARATOR + transName; + } } - } } diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/resource/ResourceUtil.java b/pentaho-kettle/src/main/java/org/pentaho/di/resource/ResourceUtil.java index 889e533..361392b 100644 --- a/pentaho-kettle/src/main/java/org/pentaho/di/resource/ResourceUtil.java +++ b/pentaho-kettle/src/main/java/org/pentaho/di/resource/ResourceUtil.java @@ -22,12 +22,6 @@ package org.pentaho.di.resource; -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; -import java.util.zip.ZipEntry; -import java.util.zip.ZipOutputStream; - import org.apache.commons.vfs2.FileObject; import org.pentaho.di.core.Const; import org.pentaho.di.core.exception.KettleException; @@ -38,148 +32,141 @@ import org.pentaho.di.trans.TransMeta; import org.pentaho.metastore.api.IMetaStore; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.zip.ZipEntry; +import java.util.zip.ZipOutputStream; + public class ResourceUtil { - private static Class PKG = ResourceUtil.class; // for i18n purposes, needed by Translator2!! - - /** - * Serializes the referenced resource export interface (Job, Transformation, Mapping, Step, Job Entry, etc) to a ZIP - * file. - * - * @param zipFilename - * The ZIP file to put the content in - * @param resourceExportInterface - * the interface to serialize - * @param space - * the space to use for variable replacement - * @param repository - * the repository to load objects from (or null if not used) - * @param metaStore - * the metaStore to load from - * @return The full VFS filename reference to the serialized export interface XML file in the ZIP archive. - * @throws KettleException - * in case anything goes wrong during serialization - */ - public static final TopLevelResource serializeResourceExportInterface( String zipFilename, - ResourceExportInterface resourceExportInterface, VariableSpace space, Repository repository, - IMetaStore metaStore ) throws KettleException { - return serializeResourceExportInterface( - zipFilename, resourceExportInterface, space, repository, metaStore, null, null ); - } - - /** - * Serializes the referenced resource export interface (Job, Transformation, Mapping, Step, Job Entry, etc) to a ZIP - * file. - * - * @param zipFilename - * The ZIP file to put the content in - * @param resourceExportInterface - * the interface to serialize - * @param space - * the space to use for variable replacement - * @param repository - * the repository to load objects from (or null if not used) - * @param injectXML - * The XML to inject into the resulting ZIP archive (optional, can be null) - * @param injectFilename - * The name of the file for the XML to inject in the ZIP archive (optional, can be null) - * @return The full VFS filename reference to the serialized export interface XML file in the ZIP archive. - * @throws KettleException - * in case anything goes wrong during serialization - */ - public static final TopLevelResource serializeResourceExportInterface( String zipFilename, - ResourceExportInterface resourceExportInterface, VariableSpace space, Repository repository, - IMetaStore metaStore, String injectXML, String injectFilename ) throws KettleException { - - ZipOutputStream out = null; - - try { - Map definitions = new HashMap(); - - // In case we want to add an extra pay-load to the exported ZIP file... - // - if ( injectXML != null ) { - ResourceDefinition resourceDefinition = new ResourceDefinition( injectFilename, injectXML ); - definitions.put( injectFilename, resourceDefinition ); - } - - ResourceNamingInterface namingInterface = new SequenceResourceNaming(); - - String topLevelResource = - resourceExportInterface.exportResources( space, definitions, namingInterface, repository, metaStore ); - - if ( topLevelResource != null && !definitions.isEmpty() ) { - - // Create the ZIP file... - // - FileObject fileObject = KettleVFS.getFileObject( zipFilename, space ); - - // Store the XML in the definitions in a ZIP file... - // - out = new ZipOutputStream( KettleVFS.getOutputStream( fileObject, false ) ); - - for ( String filename : definitions.keySet() ) { - ResourceDefinition resourceDefinition = definitions.get( filename ); - - ZipEntry zipEntry = new ZipEntry( resourceDefinition.getFilename() ); - - String comment = - BaseMessages.getString( - PKG, "ResourceUtil.SerializeResourceExportInterface.ZipEntryComment.OriginatingFile", filename, - Const.NVL( resourceDefinition.getOrigin(), "-" ) ); - zipEntry.setComment( comment ); - out.putNextEntry( zipEntry ); - - out.write( resourceDefinition.getContent().getBytes() ); - out.closeEntry(); - } - String zipURL = fileObject.getName().toString(); - return new TopLevelResource( topLevelResource, zipURL, "zip:" + zipURL + "!" + topLevelResource ); - } else { - throw new KettleException( BaseMessages.getString( PKG, "ResourceUtil.Exception.NoResourcesFoundToExport" ) ); - } - } catch ( Exception e ) { - throw new KettleException( BaseMessages.getString( - PKG, "ResourceUtil.Exception.ErrorSerializingExportInterface", resourceExportInterface.toString() ), e ); - } finally { - if ( out != null ) { + private static Class PKG = ResourceUtil.class; // for i18n purposes, needed by Translator2!! + + /** + * Serializes the referenced resource export interface (Job, Transformation, Mapping, Step, Job Entry, etc) to a ZIP + * file. + * + * @param zipFilename The ZIP file to put the content in + * @param resourceExportInterface the interface to serialize + * @param space the space to use for variable replacement + * @param repository the repository to load objects from (or null if not used) + * @param metaStore the metaStore to load from + * @return The full VFS filename reference to the serialized export interface XML file in the ZIP archive. + * @throws KettleException in case anything goes wrong during serialization + */ + public static final TopLevelResource serializeResourceExportInterface(String zipFilename, + ResourceExportInterface resourceExportInterface, VariableSpace space, Repository repository, + IMetaStore metaStore) throws KettleException { + return serializeResourceExportInterface( + zipFilename, resourceExportInterface, space, repository, metaStore, null, null); + } + + /** + * Serializes the referenced resource export interface (Job, Transformation, Mapping, Step, Job Entry, etc) to a ZIP + * file. + * + * @param zipFilename The ZIP file to put the content in + * @param resourceExportInterface the interface to serialize + * @param space the space to use for variable replacement + * @param repository the repository to load objects from (or null if not used) + * @param injectXML The XML to inject into the resulting ZIP archive (optional, can be null) + * @param injectFilename The name of the file for the XML to inject in the ZIP archive (optional, can be null) + * @return The full VFS filename reference to the serialized export interface XML file in the ZIP archive. + * @throws KettleException in case anything goes wrong during serialization + */ + public static final TopLevelResource serializeResourceExportInterface(String zipFilename, + ResourceExportInterface resourceExportInterface, VariableSpace space, Repository repository, + IMetaStore metaStore, String injectXML, String injectFilename) throws KettleException { + + ZipOutputStream out = null; + try { - out.close(); - } catch ( IOException e ) { - throw new KettleException( BaseMessages.getString( - PKG, "ResourceUtil.Exception.ErrorClosingZipStream", zipFilename ) ); + Map definitions = new HashMap(); + + // In case we want to add an extra pay-load to the exported ZIP file... + // + if (injectXML != null) { + ResourceDefinition resourceDefinition = new ResourceDefinition(injectFilename, injectXML); + definitions.put(injectFilename, resourceDefinition); + } + + ResourceNamingInterface namingInterface = new SequenceResourceNaming(); + + String topLevelResource = + resourceExportInterface.exportResources(space, definitions, namingInterface, repository, metaStore); + + if (topLevelResource != null && !definitions.isEmpty()) { + + // Create the ZIP file... + // + FileObject fileObject = KettleVFS.getFileObject(zipFilename, space); + + // Store the XML in the definitions in a ZIP file... + // + out = new ZipOutputStream(KettleVFS.getOutputStream(fileObject, false)); + + for (String filename : definitions.keySet()) { + ResourceDefinition resourceDefinition = definitions.get(filename); + + ZipEntry zipEntry = new ZipEntry(resourceDefinition.getFilename()); + + String comment = + BaseMessages.getString( + PKG, "ResourceUtil.SerializeResourceExportInterface.ZipEntryComment.OriginatingFile", filename, + Const.NVL(resourceDefinition.getOrigin(), "-")); + zipEntry.setComment(comment); + out.putNextEntry(zipEntry); + + out.write(resourceDefinition.getContent().getBytes()); + out.closeEntry(); + } + String zipURL = fileObject.getName().toString(); + return new TopLevelResource(topLevelResource, zipURL, "zip:" + zipURL + "!" + topLevelResource); + } else { + throw new KettleException(BaseMessages.getString(PKG, "ResourceUtil.Exception.NoResourcesFoundToExport")); + } + } catch (Exception e) { + throw new KettleException(BaseMessages.getString( + PKG, "ResourceUtil.Exception.ErrorSerializingExportInterface", resourceExportInterface.toString()), e); + } finally { + if (out != null) { + try { + out.close(); + } catch (IOException e) { + throw new KettleException(BaseMessages.getString( + PKG, "ResourceUtil.Exception.ErrorClosingZipStream", zipFilename)); + } + } } - } - } - } - - public static String getExplanation( String zipFilename, String launchFile, - ResourceExportInterface resourceExportInterface ) { - - String commandString = ""; - if ( Const.isWindows() ) { - if ( resourceExportInterface instanceof TransMeta ) { - commandString += "Pan.bat /file:\""; - } else { - commandString += "Kitchen.bat /file:\""; - } - } else { - if ( resourceExportInterface instanceof TransMeta ) { - commandString += "sh pan.sh -file='"; - } else { - commandString += "sh kitchen.sh -file='"; - } - } - commandString += launchFile; - if ( Const.isWindows() ) { - commandString += "\""; - } else { - commandString += "'"; } - String message = - BaseMessages.getString( - PKG, "ResourceUtil.ExportResourcesExplanation", zipFilename, commandString, launchFile, Const.CR ); - return message; - } + public static String getExplanation(String zipFilename, String launchFile, + ResourceExportInterface resourceExportInterface) { + + String commandString = ""; + if (Const.isWindows()) { + if (resourceExportInterface instanceof TransMeta) { + commandString += "Pan.bat /file:\""; + } else { + commandString += "Kitchen.bat /file:\""; + } + } else { + if (resourceExportInterface instanceof TransMeta) { + commandString += "sh pan.sh -file='"; + } else { + commandString += "sh kitchen.sh -file='"; + } + } + commandString += launchFile; + if (Const.isWindows()) { + commandString += "\""; + } else { + commandString += "'"; + } + + String message = + BaseMessages.getString( + PKG, "ResourceUtil.ExportResourcesExplanation", zipFilename, commandString, launchFile, Const.CR); + return message; + } } diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/Trans.java b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/Trans.java index 916b583..06ce49a 100644 --- a/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/Trans.java +++ b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/Trans.java @@ -23,82 +23,19 @@ package org.pentaho.di.trans; -import java.io.OutputStreamWriter; -import java.io.PrintWriter; -import java.io.UnsupportedEncodingException; -import java.net.URLEncoder; -import java.text.SimpleDateFormat; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.Date; -import java.util.Deque; -import java.util.HashMap; -import java.util.Hashtable; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Timer; -import java.util.TimerTask; -import java.util.concurrent.ArrayBlockingQueue; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; - -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; - import org.apache.commons.lang.StringUtils; import org.apache.commons.vfs2.FileName; import org.apache.commons.vfs2.FileObject; import org.pentaho.di.cluster.SlaveServer; -import org.pentaho.di.core.BlockingBatchingRowSet; -import org.pentaho.di.core.BlockingRowSet; -import org.pentaho.di.core.Const; -import org.pentaho.di.core.Counter; -import org.pentaho.di.core.ExecutorInterface; -import org.pentaho.di.core.ExtensionDataInterface; -import org.pentaho.di.core.KettleEnvironment; -import org.pentaho.di.core.QueueRowSet; -import org.pentaho.di.core.Result; -import org.pentaho.di.core.ResultFile; -import org.pentaho.di.core.RowMetaAndData; -import org.pentaho.di.core.RowSet; -import org.pentaho.di.core.SingleRowRowSet; +import org.pentaho.di.core.*; import org.pentaho.di.core.database.Database; import org.pentaho.di.core.database.DatabaseMeta; import org.pentaho.di.core.database.DatabaseTransactionListener; import org.pentaho.di.core.database.map.DatabaseConnectionMap; -import org.pentaho.di.core.exception.KettleDatabaseException; -import org.pentaho.di.core.exception.KettleException; -import org.pentaho.di.core.exception.KettleFileException; -import org.pentaho.di.core.exception.KettleTransException; -import org.pentaho.di.core.exception.KettleValueException; +import org.pentaho.di.core.exception.*; import org.pentaho.di.core.extension.ExtensionPointHandler; import org.pentaho.di.core.extension.KettleExtensionPoint; -import org.pentaho.di.core.logging.ChannelLogTable; -import org.pentaho.di.core.logging.HasLogChannelInterface; -import org.pentaho.di.core.logging.KettleLogStore; -import org.pentaho.di.core.logging.LogChannel; -import org.pentaho.di.core.logging.LogChannelInterface; -import org.pentaho.di.core.logging.LogLevel; -import org.pentaho.di.core.logging.LogStatus; -import org.pentaho.di.core.logging.LoggingHierarchy; -import org.pentaho.di.core.logging.LoggingMetric; -import org.pentaho.di.core.logging.LoggingObjectInterface; -import org.pentaho.di.core.logging.LoggingObjectType; -import org.pentaho.di.core.logging.LoggingRegistry; -import org.pentaho.di.core.logging.Metrics; -import org.pentaho.di.core.logging.MetricsLogTable; -import org.pentaho.di.core.logging.MetricsRegistry; -import org.pentaho.di.core.logging.PerformanceLogTable; -import org.pentaho.di.core.logging.StepLogTable; -import org.pentaho.di.core.logging.TransLogTable; +import org.pentaho.di.core.logging.*; import org.pentaho.di.core.metrics.MetricsDuration; import org.pentaho.di.core.metrics.MetricsSnapshotInterface; import org.pentaho.di.core.metrics.MetricsUtil; @@ -125,28 +62,25 @@ import org.pentaho.di.resource.TopLevelResource; import org.pentaho.di.trans.cluster.TransSplitter; import org.pentaho.di.trans.performance.StepPerformanceSnapShot; -import org.pentaho.di.trans.step.BaseStep; +import org.pentaho.di.trans.step.*; import org.pentaho.di.trans.step.BaseStepData.StepExecutionStatus; -import org.pentaho.di.trans.step.RunThread; -import org.pentaho.di.trans.step.StepAdapter; -import org.pentaho.di.trans.step.StepDataInterface; -import org.pentaho.di.trans.step.StepInitThread; -import org.pentaho.di.trans.step.StepInterface; -import org.pentaho.di.trans.step.StepListener; -import org.pentaho.di.trans.step.StepMeta; -import org.pentaho.di.trans.step.StepMetaDataCombi; -import org.pentaho.di.trans.step.StepPartitioningMeta; import org.pentaho.di.trans.steps.mappinginput.MappingInput; import org.pentaho.di.trans.steps.mappingoutput.MappingOutput; -import org.pentaho.di.www.AddExportServlet; -import org.pentaho.di.www.PrepareExecutionTransServlet; -import org.pentaho.di.www.RegisterTransServlet; -import org.pentaho.di.www.SlaveServerTransStatus; -import org.pentaho.di.www.SocketRepository; -import org.pentaho.di.www.StartExecutionTransServlet; -import org.pentaho.di.www.WebResult; +import org.pentaho.di.www.*; import org.pentaho.metastore.api.IMetaStore; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import java.io.OutputStreamWriter; +import java.io.PrintWriter; +import java.io.UnsupportedEncodingException; +import java.net.URLEncoder; +import java.text.SimpleDateFormat; +import java.util.*; +import java.util.concurrent.*; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + /** * This class represents the information and operations associated with the concept of a Transformation. It loads, * instantiates, initializes, runs, and monitors the execution of the transformation contained in the specified @@ -154,5427 +88,5405 @@ * * @author Matt * @since 07-04-2003 - * */ public class Trans implements VariableSpace, NamedParams, HasLogChannelInterface, LoggingObjectInterface, - ExecutorInterface, ExtensionDataInterface { + ExecutorInterface, ExtensionDataInterface { + + /** + * The package name, used for internationalization of messages. + */ + private static Class PKG = Trans.class; // for i18n purposes, needed by Translator2!! + + /** + * The replay date format. + */ + public static final String REPLAY_DATE_FORMAT = "yyyy/MM/dd HH:mm:ss"; + + /** + * The log channel interface. + */ + protected LogChannelInterface log; + + /** + * The log level. + */ + protected LogLevel logLevel = LogLevel.BASIC; + + /** + * The container object id. + */ + protected String containerObjectId; + + /** + * The log commit size. + */ + protected int logCommitSize = 10; + + /** + * The transformation metadata to execute. + */ + protected TransMeta transMeta; + + /** + * The repository we are referencing. + */ + protected Repository repository; + + /** + * The MetaStore to use + */ + protected IMetaStore metaStore; + + /** + * The job that's launching this transformation. This gives us access to the whole chain, including the parent + * variables, etc. + */ + private Job parentJob; + + /** + * The transformation that is executing this transformation in case of mappings. + */ + private Trans parentTrans; + + /** + * The parent logging object interface (this could be a transformation or a job). + */ + private LoggingObjectInterface parent; + + /** + * The name of the mapping step that executes this transformation in case this is a mapping. + */ + private String mappingStepName; + + /** + * Indicates that we want to monitor the running transformation in a GUI. + */ + private boolean monitored; + + /** + * Indicates that we are running in preview mode... + */ + private boolean preview; + + /** + * The date objects for logging information about the transformation such as start and end time, etc. + */ + private Date startDate, endDate, currentDate, logDate, depDate; + + /** + * The job start and end date. + */ + private Date jobStartDate, jobEndDate; + + /** + * The batch id. + */ + private long batchId; + + /** + * This is the batch ID that is passed from job to job to transformation, if nothing is passed, it's the + * transformation's batch id. + */ + private long passedBatchId; + + /** + * The variable bindings for the transformation. + */ + private VariableSpace variables = new Variables(); + + /** + * A list of all the row sets. + */ + private List rowsets; + + /** + * A list of all the steps. + */ + private List steps; + + /** + * The class number. + */ + public int class_nr; + + /** + * The replayDate indicates that this transformation is a replay transformation for a transformation executed on + * replayDate. If replayDate is null, the transformation is not a replay. + */ + private Date replayDate; + + /** + * Constant indicating a dispatch type of 1-to-1. + */ + public static final int TYPE_DISP_1_1 = 1; + + /** + * Constant indicating a dispatch type of 1-to-N. + */ + public static final int TYPE_DISP_1_N = 2; + + /** + * Constant indicating a dispatch type of N-to-1. + */ + public static final int TYPE_DISP_N_1 = 3; + + /** + * Constant indicating a dispatch type of N-to-N. + */ + public static final int TYPE_DISP_N_N = 4; + + /** + * Constant indicating a dispatch type of N-to-M. + */ + public static final int TYPE_DISP_N_M = 5; + + /** + * Constant indicating a transformation status of Finished. + */ + public static final String STRING_FINISHED = "Finished"; + + /** + * Constant indicating a transformation status of Finished (with errors). + */ + public static final String STRING_FINISHED_WITH_ERRORS = "Finished (with errors)"; + + /** + * Constant indicating a transformation status of Running. + */ + public static final String STRING_RUNNING = "Running"; + + /** + * Constant indicating a transformation status of Paused. + */ + public static final String STRING_PAUSED = "Paused"; + + /** + * Constant indicating a transformation status of Preparing for execution. + */ + public static final String STRING_PREPARING = "Preparing executing"; + + /** + * Constant indicating a transformation status of Initializing. + */ + public static final String STRING_INITIALIZING = "Initializing"; + + /** + * Constant indicating a transformation status of Waiting. + */ + public static final String STRING_WAITING = "Waiting"; + + /** + * Constant indicating a transformation status of Stopped. + */ + public static final String STRING_STOPPED = "Stopped"; + + /** + * Constant indicating a transformation status of Halting. + */ + public static final String STRING_HALTING = "Halting"; + + /** + * Constant specifying a filename containing XML to inject into a ZIP file created during resource export. + */ + public static final String CONFIGURATION_IN_EXPORT_FILENAME = "__job_execution_configuration__.xml"; + + /** + * Whether safe mode is enabled. + */ + private boolean safeModeEnabled; + + /** + * The thread name. + */ + @Deprecated + private String threadName; + + /** + * The transaction ID + */ + private String transactionId; + + /** + * Whether the transformation is preparing for execution. + */ + private volatile boolean preparing; + + /** + * Whether the transformation is initializing. + */ + private boolean initializing; + + /** + * Whether the transformation is running. + */ + private boolean running; + + /** + * Whether the transformation is finished. + */ + private final AtomicBoolean finished; + + /** + * Whether the transformation is paused. + */ + private AtomicBoolean paused; + + /** + * Whether the transformation is stopped. + */ + private AtomicBoolean stopped; + + /** + * The number of errors that have occurred during execution of the transformation. + */ + private AtomicInteger errors; + + /** + * Whether the transformation is ready to start. + */ + private boolean readyToStart; + + /** + * Step performance snapshots. + */ + private Map> stepPerformanceSnapShots; + + /** + * The step performance snapshot timer. + */ + private Timer stepPerformanceSnapShotTimer; + + /** + * A list of listeners attached to the transformation. + */ + private List transListeners; + + /** + * A list of stop-event listeners attached to the transformation. + */ + private List transStoppedListeners; + + /** + * In case this transformation starts to delegate work to a local transformation or job + */ + private List delegationListeners; + + /** + * The number of finished steps. + */ + private int nrOfFinishedSteps; + + /** + * The number of active steps. + */ + private int nrOfActiveSteps; + + /** + * The named parameters. + */ + private NamedParams namedParams = new NamedParamsDefault(); + + /** + * The socket repository. + */ + private SocketRepository socketRepository; + + /** + * The transformation log table database connection. + */ + private Database transLogTableDatabaseConnection; + + /** + * The step performance snapshot sequence number. + */ + private AtomicInteger stepPerformanceSnapshotSeqNr; + + /** + * The last written step performance sequence number. + */ + private int lastWrittenStepPerformanceSequenceNr; + + /** + * The last step performance snapshot sequence number added. + */ + private int lastStepPerformanceSnapshotSeqNrAdded; + + /** + * The active subtransformations. + */ + private Map activeSubtransformations; + + /** + * The active subjobs + */ + private Map activeSubjobs; + + /** + * The step performance snapshot size limit. + */ + private int stepPerformanceSnapshotSizeLimit; + + /** + * The servlet print writer. + */ + private PrintWriter servletPrintWriter; + + /** + * The trans finished blocking queue. + */ + private ArrayBlockingQueue transFinishedBlockingQueue; + + /** + * The name of the executing server + */ + private String executingServer; + + /** + * The name of the executing user + */ + private String executingUser; + + private Result previousResult; + + protected List resultRows; + + protected List resultFiles; + + /** + * The command line arguments for the transformation. + */ + protected String[] arguments; + + /** + * A table of named counters. + */ + protected Hashtable counters; + + private HttpServletResponse servletResponse; + + private HttpServletRequest servletRequest; + + private Map extensionDataMap; + + private ExecutorService heartbeat = null; // this transformations's heartbeat scheduled executor + + /** + * Instantiates a new transformation. + */ + public Trans() { + finished = new AtomicBoolean(false); + paused = new AtomicBoolean(false); + stopped = new AtomicBoolean(false); + + transListeners = Collections.synchronizedList(new ArrayList()); + transStoppedListeners = Collections.synchronizedList(new ArrayList()); + delegationListeners = new ArrayList(); + + // Get a valid transactionId in case we run database transactional. + transactionId = calculateTransactionId(); + threadName = transactionId; // / backward compatibility but deprecated! + + errors = new AtomicInteger(0); + + stepPerformanceSnapshotSeqNr = new AtomicInteger(0); + lastWrittenStepPerformanceSequenceNr = 0; + + activeSubtransformations = new HashMap(); + activeSubjobs = new HashMap(); + + resultRows = new ArrayList(); + resultFiles = new ArrayList(); + counters = new Hashtable(); + + extensionDataMap = new HashMap(); + } - /** The package name, used for internationalization of messages. */ - private static Class PKG = Trans.class; // for i18n purposes, needed by Translator2!! + /** + * Initializes a transformation from transformation meta-data defined in memory. + * + * @param transMeta the transformation meta-data to use. + */ + public Trans(TransMeta transMeta) { + this(transMeta, null); + } - /** The replay date format. */ - public static final String REPLAY_DATE_FORMAT = "yyyy/MM/dd HH:mm:ss"; + /** + * Initializes a transformation from transformation meta-data defined in memory. Also take into account the parent log + * channel interface (job or transformation) for logging lineage purposes. + * + * @param transMeta the transformation meta-data to use. + * @param parent the parent job that is executing this transformation + */ + public Trans(TransMeta transMeta, LoggingObjectInterface parent) { + this(); + this.transMeta = transMeta; + setParent(parent); - /** The log channel interface. */ - protected LogChannelInterface log; + initializeVariablesFrom(transMeta); + copyParametersFrom(transMeta); + transMeta.activateParameters(); - /** The log level. */ - protected LogLevel logLevel = LogLevel.BASIC; + // Get a valid transactionId in case we run database transactional. + transactionId = calculateTransactionId(); + threadName = transactionId; // / backward compatibility but deprecated! + } - /** The container object id. */ - protected String containerObjectId; + /** + * Sets the parent logging object. + * + * @param parent the new parent + */ + public void setParent(LoggingObjectInterface parent) { + this.parent = parent; - /** The log commit size. */ - protected int logCommitSize = 10; + this.log = new LogChannel(this, parent); + this.logLevel = log.getLogLevel(); + this.containerObjectId = log.getContainerObjectId(); - /** The transformation metadata to execute. */ - protected TransMeta transMeta; + if (log.isDetailed()) { + log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.TransformationIsPreloaded")); + } + if (log.isDebug()) { + log.logDebug(BaseMessages.getString(PKG, "Trans.Log.NumberOfStepsToRun", String.valueOf(transMeta.nrSteps()), + String.valueOf(transMeta.nrTransHops()))); + } - /** - * The repository we are referencing. - */ - protected Repository repository; + } - /** - * The MetaStore to use - */ - protected IMetaStore metaStore; + /** + * Sets the default log commit size. + */ + private void setDefaultLogCommitSize() { + String propLogCommitSize = this.getVariable("pentaho.log.commit.size"); + if (propLogCommitSize != null) { + // override the logCommit variable + try { + logCommitSize = Integer.parseInt(propLogCommitSize); + } catch (Exception ignored) { + logCommitSize = 10; // ignore parsing error and default to 10 + } + } - /** - * The job that's launching this transformation. This gives us access to the whole chain, including the parent - * variables, etc. - */ - private Job parentJob; + } - /** - * The transformation that is executing this transformation in case of mappings. - */ - private Trans parentTrans; + /** + * Gets the log channel interface for the transformation. + * + * @return the log channel + * @see org.pentaho.di.core.logging.HasLogChannelInterface#getLogChannel() + */ + public LogChannelInterface getLogChannel() { + return log; + } - /** The parent logging object interface (this could be a transformation or a job). */ - private LoggingObjectInterface parent; + /** + * Sets the log channel interface for the transformation. + * + * @param log the new log channel interface + */ + public void setLog(LogChannelInterface log) { + this.log = log; + } - /** The name of the mapping step that executes this transformation in case this is a mapping. */ - private String mappingStepName; + /** + * Gets the name of the transformation. + * + * @return the transformation name + */ + public String getName() { + if (transMeta == null) { + return null; + } - /** Indicates that we want to monitor the running transformation in a GUI. */ - private boolean monitored; + return transMeta.getName(); + } + + /** + * Instantiates a new transformation using any of the provided parameters including the variable bindings, a + * repository, a name, a repository directory name, and a filename. This is a multi-purpose method that supports + * loading a transformation from a file (if the filename is provided but not a repository object) or from a repository + * (if the repository object, repository directory name, and transformation name are specified). + * + * @param parent the parent variable space and named params + * @param rep the repository + * @param name the name of the transformation + * @param dirname the dirname the repository directory name + * @param filename the filename containing the transformation definition + * @throws KettleException if any error occurs during loading, parsing, or creation of the transformation + */ + public Trans(Parent parent, Repository rep, String name, String dirname, + String filename) throws KettleException { + this(); + try { + if (rep != null) { + RepositoryDirectoryInterface repdir = rep.findDirectory(dirname); + if (repdir != null) { + this.transMeta = rep.loadTransformation(name, repdir, null, false, null); // reads last version + } else { + throw new KettleException(BaseMessages.getString(PKG, "Trans.Exception.UnableToLoadTransformation", name, + dirname)); + } + } else { + transMeta = new TransMeta(filename, false); + } - /** - * Indicates that we are running in preview mode... - */ - private boolean preview; + this.log = LogChannel.GENERAL; - /** The date objects for logging information about the transformation such as start and end time, etc. */ - private Date startDate, endDate, currentDate, logDate, depDate; + transMeta.initializeVariablesFrom(parent); + initializeVariablesFrom(parent); + // PDI-3064 do not erase parameters from meta! + // instead of this - copy parameters to actual transformation + this.copyParametersFrom(parent); + this.activateParameters(); - /** The job start and end date. */ - private Date jobStartDate, jobEndDate; + this.setDefaultLogCommitSize(); - /** The batch id. */ - private long batchId; + // Get a valid transactionId in case we run database transactional. + transactionId = calculateTransactionId(); + threadName = transactionId; // / backward compatibility but deprecated! + } catch (KettleException e) { + throw new KettleException(BaseMessages.getString(PKG, "Trans.Exception.UnableToOpenTransformation", name), e); + } + } - /** - * This is the batch ID that is passed from job to job to transformation, if nothing is passed, it's the - * transformation's batch id. - */ - private long passedBatchId; + /** + * Executes the transformation. This method will prepare the transformation for execution and then start all the + * threads associated with the transformation and its steps. + * + * @param arguments the arguments + * @throws KettleException if the transformation could not be prepared (initialized) + */ + public void execute(String[] arguments) throws KettleException { + prepareExecution(arguments); + startThreads(); + } - /** The variable bindings for the transformation. */ - private VariableSpace variables = new Variables(); + /** + * Prepares the transformation for execution. This includes setting the arguments and parameters as well as preparing + * and tracking the steps and hops in the transformation. + * + * @param arguments the arguments to use for this transformation + * @throws KettleException in case the transformation could not be prepared (initialized) + */ + public void prepareExecution(String[] arguments) throws KettleException { + preparing = true; + startDate = null; + running = false; - /** A list of all the row sets. */ - private List rowsets; + log.snap(Metrics.METRIC_TRANSFORMATION_EXECUTION_START); + log.snap(Metrics.METRIC_TRANSFORMATION_INIT_START); - /** A list of all the steps. */ - private List steps; + ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.TransformationPrepareExecution.id, this); - /** The class number. */ - public int class_nr; + checkCompatibility(); - /** - * The replayDate indicates that this transformation is a replay transformation for a transformation executed on - * replayDate. If replayDate is null, the transformation is not a replay. - */ - private Date replayDate; + // Set the arguments on the transformation... + // + if (arguments != null) { + setArguments(arguments); + } - /** Constant indicating a dispatch type of 1-to-1. */ - public static final int TYPE_DISP_1_1 = 1; + activateParameters(); + transMeta.activateParameters(); - /** Constant indicating a dispatch type of 1-to-N. */ - public static final int TYPE_DISP_1_N = 2; + if (transMeta.getName() == null) { + if (transMeta.getFilename() != null) { + log.logBasic(BaseMessages.getString(PKG, "Trans.Log.DispacthingStartedForFilename", transMeta + .getFilename())); + } + } else { + log.logBasic(BaseMessages.getString(PKG, "Trans.Log.DispacthingStartedForTransformation", transMeta + .getName())); + } - /** Constant indicating a dispatch type of N-to-1. */ - public static final int TYPE_DISP_N_1 = 3; + if (getArguments() != null) { + if (log.isDetailed()) { + log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.NumberOfArgumentsDetected", String.valueOf( + getArguments().length))); + } + } - /** Constant indicating a dispatch type of N-to-N. */ - public static final int TYPE_DISP_N_N = 4; + if (isSafeModeEnabled()) { + if (log.isDetailed()) { + log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.SafeModeIsEnabled", transMeta.getName())); + } + } - /** Constant indicating a dispatch type of N-to-M. */ - public static final int TYPE_DISP_N_M = 5; + if (getReplayDate() != null) { + SimpleDateFormat df = new SimpleDateFormat(REPLAY_DATE_FORMAT); + log.logBasic(BaseMessages.getString(PKG, "Trans.Log.ThisIsAReplayTransformation") + df.format( + getReplayDate())); + } else { + if (log.isDetailed()) { + log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.ThisIsNotAReplayTransformation")); + } + } - /** Constant indicating a transformation status of Finished. */ - public static final String STRING_FINISHED = "Finished"; + // setInternalKettleVariables(this); --> Let's not do this, when running + // without file, for example remote, it spoils the fun - /** Constant indicating a transformation status of Finished (with errors). */ - public static final String STRING_FINISHED_WITH_ERRORS = "Finished (with errors)"; + // extra check to see if the servlet print writer has some value in case + // folks want to test it locally... + // + if (servletPrintWriter == null) { + String encoding = System.getProperty("KETTLE_DEFAULT_SERVLET_ENCODING", null); + if (encoding == null) { + servletPrintWriter = new PrintWriter(new OutputStreamWriter(System.out)); + } else { + try { + servletPrintWriter = new PrintWriter(new OutputStreamWriter(System.out, encoding)); + } catch (UnsupportedEncodingException ex) { + servletPrintWriter = new PrintWriter(new OutputStreamWriter(System.out)); + } + } + } - /** Constant indicating a transformation status of Running. */ - public static final String STRING_RUNNING = "Running"; + // Keep track of all the row sets and allocated steps + // + steps = new ArrayList(); + rowsets = new ArrayList(); - /** Constant indicating a transformation status of Paused. */ - public static final String STRING_PAUSED = "Paused"; + List hopsteps = transMeta.getTransHopSteps(false); - /** Constant indicating a transformation status of Preparing for execution. */ - public static final String STRING_PREPARING = "Preparing executing"; + if (log.isDetailed()) { + log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.FoundDefferentSteps", String.valueOf(hopsteps + .size()))); + log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.AllocatingRowsets")); + } + // First allocate all the rowsets required! + // Note that a mapping doesn't receive ANY input or output rowsets... + // + for (int i = 0; i < hopsteps.size(); i++) { + StepMeta thisStep = hopsteps.get(i); + if (thisStep.isMapping()) { + continue; // handled and allocated by the mapping step itself. + } - /** Constant indicating a transformation status of Initializing. */ - public static final String STRING_INITIALIZING = "Initializing"; + if (log.isDetailed()) { + log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.AllocateingRowsetsForStep", String.valueOf(i), + thisStep.getName())); + } - /** Constant indicating a transformation status of Waiting. */ - public static final String STRING_WAITING = "Waiting"; + List nextSteps = transMeta.findNextSteps(thisStep); + int nrTargets = nextSteps.size(); - /** Constant indicating a transformation status of Stopped. */ - public static final String STRING_STOPPED = "Stopped"; + for (int n = 0; n < nrTargets; n++) { + // What's the next step? + StepMeta nextStep = nextSteps.get(n); + if (nextStep.isMapping()) { + continue; // handled and allocated by the mapping step itself. + } - /** Constant indicating a transformation status of Halting. */ - public static final String STRING_HALTING = "Halting"; + // How many times do we start the source step? + int thisCopies = thisStep.getCopies(); - /** Constant specifying a filename containing XML to inject into a ZIP file created during resource export. */ - public static final String CONFIGURATION_IN_EXPORT_FILENAME = "__job_execution_configuration__.xml"; + if (thisCopies < 0) { + // This can only happen if a variable is used that didn't resolve to a positive integer value + // + throw new KettleException(BaseMessages.getString(PKG, "Trans.Log.StepCopiesNotCorrectlyDefined", thisStep + .getName())); + } - /** Whether safe mode is enabled. */ - private boolean safeModeEnabled; + // How many times do we start the target step? + int nextCopies = nextStep.getCopies(); - /** The thread name. */ - @Deprecated - private String threadName; + // Are we re-partitioning? + boolean repartitioning; + if (thisStep.isPartitioned()) { + repartitioning = !thisStep.getStepPartitioningMeta().equals(nextStep.getStepPartitioningMeta()); + } else { + repartitioning = nextStep.isPartitioned(); + } - /** The transaction ID */ - private String transactionId; + int nrCopies; + if (log.isDetailed()) { + log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.copiesInfo", String.valueOf(thisCopies), String + .valueOf(nextCopies))); + } + int dispatchType; + if (thisCopies == 1 && nextCopies == 1) { + dispatchType = TYPE_DISP_1_1; + nrCopies = 1; + } else if (thisCopies == 1 && nextCopies > 1) { + dispatchType = TYPE_DISP_1_N; + nrCopies = nextCopies; + } else if (thisCopies > 1 && nextCopies == 1) { + dispatchType = TYPE_DISP_N_1; + nrCopies = thisCopies; + } else if (thisCopies == nextCopies && !repartitioning) { + dispatchType = TYPE_DISP_N_N; + nrCopies = nextCopies; + } else { + // > 1! + dispatchType = TYPE_DISP_N_M; + nrCopies = nextCopies; + } // Allocate a rowset for each destination step - /** Whether the transformation is preparing for execution. */ - private volatile boolean preparing; + // Allocate the rowsets + // + if (dispatchType != TYPE_DISP_N_M) { + for (int c = 0; c < nrCopies; c++) { + RowSet rowSet; + switch (transMeta.getTransformationType()) { + case Normal: + // This is a temporary patch until the batching rowset has proven + // to be working in all situations. + // Currently there are stalling problems when dealing with small + // amounts of rows. + // + Boolean batchingRowSet = + ValueMeta.convertStringToBoolean(System.getProperty(Const.KETTLE_BATCHING_ROWSET)); + if (batchingRowSet != null && batchingRowSet.booleanValue()) { + rowSet = new BlockingBatchingRowSet(transMeta.getSizeRowset()); + } else { + rowSet = new BlockingRowSet(transMeta.getSizeRowset()); + } + break; + + case SerialSingleThreaded: + rowSet = new SingleRowRowSet(); + break; + + case SingleThreaded: + rowSet = new QueueRowSet(); + break; + + default: + throw new KettleException("Unhandled transformation type: " + transMeta.getTransformationType()); + } + + switch (dispatchType) { + case TYPE_DISP_1_1: + rowSet.setThreadNameFromToCopy(thisStep.getName(), 0, nextStep.getName(), 0); + break; + case TYPE_DISP_1_N: + rowSet.setThreadNameFromToCopy(thisStep.getName(), 0, nextStep.getName(), c); + break; + case TYPE_DISP_N_1: + rowSet.setThreadNameFromToCopy(thisStep.getName(), c, nextStep.getName(), 0); + break; + case TYPE_DISP_N_N: + rowSet.setThreadNameFromToCopy(thisStep.getName(), c, nextStep.getName(), c); + break; + default: + break; + } + rowsets.add(rowSet); + if (log.isDetailed()) { + log.logDetailed(BaseMessages.getString(PKG, "Trans.TransformationAllocatedNewRowset", rowSet + .toString())); + } + } + } else { + // For each N source steps we have M target steps + // + // From each input step we go to all output steps. + // This allows maximum flexibility for re-partitioning, + // distribution... + for (int s = 0; s < thisCopies; s++) { + for (int t = 0; t < nextCopies; t++) { + BlockingRowSet rowSet = new BlockingRowSet(transMeta.getSizeRowset()); + rowSet.setThreadNameFromToCopy(thisStep.getName(), s, nextStep.getName(), t); + rowsets.add(rowSet); + if (log.isDetailed()) { + log.logDetailed(BaseMessages.getString(PKG, "Trans.TransformationAllocatedNewRowset", rowSet + .toString())); + } + } + } + } + } + log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.AllocatedRowsets", String.valueOf(rowsets.size()), + String.valueOf(i), thisStep.getName()) + " "); + } - /** Whether the transformation is initializing. */ - private boolean initializing; + if (log.isDetailed()) { + log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.AllocatingStepsAndStepData")); + } - /** Whether the transformation is running. */ - private boolean running; + // Allocate the steps & the data... + // + for (int i = 0; i < hopsteps.size(); i++) { + StepMeta stepMeta = hopsteps.get(i); + String stepid = stepMeta.getStepID(); - /** Whether the transformation is finished. */ - private final AtomicBoolean finished; + if (log.isDetailed()) { + log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.TransformationIsToAllocateStep", stepMeta.getName(), + stepid)); + } - /** Whether the transformation is paused. */ - private AtomicBoolean paused; + // How many copies are launched of this step? + int nrCopies = stepMeta.getCopies(); - /** Whether the transformation is stopped. */ - private AtomicBoolean stopped; + if (log.isDebug()) { + log.logDebug(BaseMessages.getString(PKG, "Trans.Log.StepHasNumberRowCopies", String.valueOf(nrCopies))); + } - /** The number of errors that have occurred during execution of the transformation. */ - private AtomicInteger errors; + // At least run once... + for (int c = 0; c < nrCopies; c++) { + // Make sure we haven't started it yet! + if (!hasStepStarted(stepMeta.getName(), c)) { + StepMetaDataCombi combi = new StepMetaDataCombi(); + + combi.stepname = stepMeta.getName(); + combi.copy = c; + + // The meta-data + combi.stepMeta = stepMeta; + combi.meta = stepMeta.getStepMetaInterface(); + + // Allocate the step data + StepDataInterface data = combi.meta.getStepData(); + combi.data = data; + + // Allocate the step + StepInterface step = combi.meta.getStep(stepMeta, data, c, transMeta, this); + + // Copy the variables of the transformation to the step... + // don't share. Each copy of the step has its own variables. + // + step.initializeVariablesFrom(this); + step.setUsingThreadPriorityManagment(transMeta.isUsingThreadPriorityManagment()); + + // Pass the connected repository & metaStore to the steps runtime + // + step.setRepository(repository); + step.setMetaStore(metaStore); + + // If the step is partitioned, set the partitioning ID and some other + // things as well... + if (stepMeta.isPartitioned()) { + List partitionIDs = stepMeta.getStepPartitioningMeta().getPartitionSchema().getPartitionIDs(); + if (partitionIDs != null && partitionIDs.size() > 0) { + step.setPartitionID(partitionIDs.get(c)); // Pass the partition ID + // to the step + } + } - /** Whether the transformation is ready to start. */ - private boolean readyToStart; + // Save the step too + combi.step = step; - /** Step performance snapshots. */ - private Map> stepPerformanceSnapShots; + // Pass logging level and metrics gathering down to the step level. + // / + if (combi.step instanceof LoggingObjectInterface) { + LogChannelInterface logChannel = combi.step.getLogChannel(); + logChannel.setLogLevel(logLevel); + logChannel.setGatheringMetrics(log.isGatheringMetrics()); + } - /** The step performance snapshot timer. */ - private Timer stepPerformanceSnapShotTimer; + // Add to the bunch... + steps.add(combi); - /** A list of listeners attached to the transformation. */ - private List transListeners; + if (log.isDetailed()) { + log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.TransformationHasAllocatedANewStep", stepMeta + .getName(), String.valueOf(c))); + } + } + } + } - /** A list of stop-event listeners attached to the transformation. */ - private List transStoppedListeners; + // Now we need to verify if certain rowsets are not meant to be for error + // handling... + // Loop over the steps and for every step verify the output rowsets + // If a rowset is going to a target step in the steps error handling + // metadata, set it to the errorRowSet. + // The input rowsets are already in place, so the next step just accepts the + // rows. + // Metadata wise we need to do the same trick in TransMeta + // + for (int s = 0; s < steps.size(); s++) { + StepMetaDataCombi combi = steps.get(s); + if (combi.stepMeta.isDoingErrorHandling()) { + combi.step.identifyErrorOutput(); - /** In case this transformation starts to delegate work to a local transformation or job */ - private List delegationListeners; + } + } - /** The number of finished steps. */ - private int nrOfFinishedSteps; + // Now (optionally) write start log record! + // Make sure we synchronize appropriately to avoid duplicate batch IDs. + // + Object syncObject = this; + if (parentJob != null) { + syncObject = parentJob; // parallel execution in a job + } + if (parentTrans != null) { + syncObject = parentTrans; // multiple sub-transformations + } + synchronized (syncObject) { + calculateBatchIdAndDateRange(); + beginProcessing(); + } - /** The number of active steps. */ - private int nrOfActiveSteps; + // Set the partition-to-rowset mapping + // + for (int i = 0; i < steps.size(); i++) { + StepMetaDataCombi sid = steps.get(i); - /** The named parameters. */ - private NamedParams namedParams = new NamedParamsDefault(); + StepMeta stepMeta = sid.stepMeta; + StepInterface baseStep = sid.step; - /** The socket repository. */ - private SocketRepository socketRepository; + baseStep.setPartitioned(stepMeta.isPartitioned()); - /** The transformation log table database connection. */ - private Database transLogTableDatabaseConnection; + // Now let's take a look at the source and target relation + // + // If this source step is not partitioned, and the target step is: it + // means we need to re-partition the incoming data. + // If both steps are partitioned on the same method and schema, we don't + // need to re-partition + // If both steps are partitioned on a different method or schema, we need + // to re-partition as well. + // If both steps are not partitioned, we don't need to re-partition + // + boolean isThisPartitioned = stepMeta.isPartitioned(); + PartitionSchema thisPartitionSchema = null; + if (isThisPartitioned) { + thisPartitionSchema = stepMeta.getStepPartitioningMeta().getPartitionSchema(); + } - /** The step performance snapshot sequence number. */ - private AtomicInteger stepPerformanceSnapshotSeqNr; + boolean isNextPartitioned = false; + StepPartitioningMeta nextStepPartitioningMeta = null; + PartitionSchema nextPartitionSchema = null; + + List nextSteps = transMeta.findNextSteps(stepMeta); + int nrNext = nextSteps.size(); + for (int p = 0; p < nrNext; p++) { + StepMeta nextStep = nextSteps.get(p); + if (nextStep.isPartitioned()) { + isNextPartitioned = true; + nextStepPartitioningMeta = nextStep.getStepPartitioningMeta(); + nextPartitionSchema = nextStepPartitioningMeta.getPartitionSchema(); + } + } - /** The last written step performance sequence number. */ - private int lastWrittenStepPerformanceSequenceNr; + baseStep.setRepartitioning(StepPartitioningMeta.PARTITIONING_METHOD_NONE); - /** The last step performance snapshot sequence number added. */ - private int lastStepPerformanceSnapshotSeqNrAdded; + // If the next step is partitioned differently, set re-partitioning, when + // running locally. + // + if ((!isThisPartitioned && isNextPartitioned) || (isThisPartitioned && isNextPartitioned + && !thisPartitionSchema.equals(nextPartitionSchema))) { + baseStep.setRepartitioning(nextStepPartitioningMeta.getMethodType()); + } - /** The active subtransformations. */ - private Map activeSubtransformations; + // For partitioning to a set of remove steps (repartitioning from a master + // to a set or remote output steps) + // + StepPartitioningMeta targetStepPartitioningMeta = baseStep.getStepMeta().getTargetStepPartitioningMeta(); + if (targetStepPartitioningMeta != null) { + baseStep.setRepartitioning(targetStepPartitioningMeta.getMethodType()); + } + } - /** The active subjobs */ - private Map activeSubjobs; + preparing = false; + initializing = true; - /** The step performance snapshot size limit. */ - private int stepPerformanceSnapshotSizeLimit; + // Do a topology sort... Over 150 step (copies) things might be slowing down too much. + // + if (isMonitored() && steps.size() < 150) { + doTopologySortOfSteps(); + } - /** The servlet print writer. */ - private PrintWriter servletPrintWriter; + if (log.isDetailed()) { + log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.InitialisingSteps", String.valueOf(steps.size()))); + } - /** The trans finished blocking queue. */ - private ArrayBlockingQueue transFinishedBlockingQueue; + StepInitThread[] initThreads = new StepInitThread[steps.size()]; + Thread[] threads = new Thread[steps.size()]; - /** The name of the executing server */ - private String executingServer; + // Initialize all the threads... + // + for (int i = 0; i < steps.size(); i++) { + final StepMetaDataCombi sid = steps.get(i); - /** The name of the executing user */ - private String executingUser; + // Do the init code in the background! + // Init all steps at once, but ALL steps need to finish before we can + // continue properly! + // + initThreads[i] = new StepInitThread(sid, log); - private Result previousResult; + // Put it in a separate thread! + // + threads[i] = new Thread(initThreads[i]); + threads[i].setName("init of " + sid.stepname + "." + sid.copy + " (" + threads[i].getName() + ")"); - protected List resultRows; + ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.StepBeforeInitialize.id, initThreads[i]); - protected List resultFiles; + threads[i].start(); + } - /** The command line arguments for the transformation. */ - protected String[] arguments; + for (int i = 0; i < threads.length; i++) { + try { + threads[i].join(); + ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.StepAfterInitialize.id, initThreads[i]); + } catch (Exception ex) { + log.logError("Error with init thread: " + ex.getMessage(), ex.getMessage()); + log.logError(Const.getStackTracker(ex)); + } + } - /** - * A table of named counters. - */ - protected Hashtable counters; + initializing = false; + boolean ok = true; - private HttpServletResponse servletResponse; + // All step are initialized now: see if there was one that didn't do it + // correctly! + // + for (int i = 0; i < initThreads.length; i++) { + StepMetaDataCombi combi = initThreads[i].getCombi(); + if (!initThreads[i].isOk()) { + log.logError(BaseMessages.getString(PKG, "Trans.Log.StepFailedToInit", combi.stepname + "." + combi.copy)); + combi.data.setStatus(StepExecutionStatus.STATUS_STOPPED); + ok = false; + } else { + combi.data.setStatus(StepExecutionStatus.STATUS_IDLE); + if (log.isDetailed()) { + log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.StepInitialized", combi.stepname + "." + + combi.copy)); + } + } + } - private HttpServletRequest servletRequest; + if (!ok) { + // Halt the other threads as well, signal end-of-the line to the outside + // world... + // Also explicitly call dispose() to clean up resources opened during + // init(); + // + for (int i = 0; i < initThreads.length; i++) { + StepMetaDataCombi combi = initThreads[i].getCombi(); - private Map extensionDataMap; + // Dispose will overwrite the status, but we set it back right after + // this. + combi.step.dispose(combi.meta, combi.data); - private ExecutorService heartbeat = null; // this transformations's heartbeat scheduled executor + if (initThreads[i].isOk()) { + combi.data.setStatus(StepExecutionStatus.STATUS_HALTED); + } else { + combi.data.setStatus(StepExecutionStatus.STATUS_STOPPED); + } + } - /** - * Instantiates a new transformation. - */ - public Trans() { - finished = new AtomicBoolean( false ); - paused = new AtomicBoolean( false ); - stopped = new AtomicBoolean( false ); + // Just for safety, fire the trans finished listeners... + try { + fireTransFinishedListeners(); + } catch (KettleException e) { + // listeners produces errors + log.logError(BaseMessages.getString(PKG, "Trans.FinishListeners.Exception")); + // we will not pass this exception up to prepareExecuton() entry point. + } finally { + // Flag the transformation as finished even if exception was thrown + setFinished(true); + } - transListeners = Collections.synchronizedList( new ArrayList() ); - transStoppedListeners = Collections.synchronizedList( new ArrayList() ); - delegationListeners = new ArrayList(); + // Pass along the log during preview. Otherwise it becomes hard to see + // what went wrong. + // + if (preview) { + String logText = KettleLogStore.getAppender().getBuffer(getLogChannelId(), true).toString(); + throw new KettleException(BaseMessages.getString(PKG, "Trans.Log.FailToInitializeAtLeastOneStep") + Const.CR + + logText); + } else { + throw new KettleException(BaseMessages.getString(PKG, "Trans.Log.FailToInitializeAtLeastOneStep") + + Const.CR); + } + } - // Get a valid transactionId in case we run database transactional. - transactionId = calculateTransactionId(); - threadName = transactionId; // / backward compatibility but deprecated! + log.snap(Metrics.METRIC_TRANSFORMATION_INIT_STOP); - errors = new AtomicInteger( 0 ); + KettleEnvironment.setExecutionInformation(this, repository); - stepPerformanceSnapshotSeqNr = new AtomicInteger( 0 ); - lastWrittenStepPerformanceSequenceNr = 0; + readyToStart = true; + } - activeSubtransformations = new HashMap(); - activeSubjobs = new HashMap(); + @SuppressWarnings("deprecation") + private void checkCompatibility() { + // If we don't have a previous result and transMeta does have one, someone has been using a deprecated method. + // + if (transMeta.getPreviousResult() != null && getPreviousResult() == null) { + setPreviousResult(transMeta.getPreviousResult()); + } - resultRows = new ArrayList(); - resultFiles = new ArrayList(); - counters = new Hashtable(); - - extensionDataMap = new HashMap(); - } - - /** - * Initializes a transformation from transformation meta-data defined in memory. - * - * @param transMeta - * the transformation meta-data to use. - */ - public Trans( TransMeta transMeta ) { - this( transMeta, null ); - } - - /** - * Initializes a transformation from transformation meta-data defined in memory. Also take into account the parent log - * channel interface (job or transformation) for logging lineage purposes. - * - * @param transMeta - * the transformation meta-data to use. - * @param parent - * the parent job that is executing this transformation - */ - public Trans( TransMeta transMeta, LoggingObjectInterface parent ) { - this(); - this.transMeta = transMeta; - setParent( parent ); - - initializeVariablesFrom( transMeta ); - copyParametersFrom( transMeta ); - transMeta.activateParameters(); - - // Get a valid transactionId in case we run database transactional. - transactionId = calculateTransactionId(); - threadName = transactionId; // / backward compatibility but deprecated! - } - - /** - * Sets the parent logging object. - * - * @param parent - * the new parent - */ - public void setParent( LoggingObjectInterface parent ) { - this.parent = parent; - - this.log = new LogChannel( this, parent ); - this.logLevel = log.getLogLevel(); - this.containerObjectId = log.getContainerObjectId(); - - if ( log.isDetailed() ) { - log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.TransformationIsPreloaded" ) ); - } - if ( log.isDebug() ) { - log.logDebug( BaseMessages.getString( PKG, "Trans.Log.NumberOfStepsToRun", String.valueOf( transMeta.nrSteps() ), - String.valueOf( transMeta.nrTransHops() ) ) ); - } - - } - - /** - * Sets the default log commit size. - */ - private void setDefaultLogCommitSize() { - String propLogCommitSize = this.getVariable( "pentaho.log.commit.size" ); - if ( propLogCommitSize != null ) { - // override the logCommit variable - try { - logCommitSize = Integer.parseInt( propLogCommitSize ); - } catch ( Exception ignored ) { - logCommitSize = 10; // ignore parsing error and default to 10 - } - } - - } - - /** - * Gets the log channel interface for the transformation. - * - * @return the log channel - * @see org.pentaho.di.core.logging.HasLogChannelInterface#getLogChannel() - */ - public LogChannelInterface getLogChannel() { - return log; - } - - /** - * Sets the log channel interface for the transformation. - * - * @param log - * the new log channel interface - */ - public void setLog( LogChannelInterface log ) { - this.log = log; - } - - /** - * Gets the name of the transformation. - * - * @return the transformation name - */ - public String getName() { - if ( transMeta == null ) { - return null; - } - - return transMeta.getName(); - } - - /** - * Instantiates a new transformation using any of the provided parameters including the variable bindings, a - * repository, a name, a repository directory name, and a filename. This is a multi-purpose method that supports - * loading a transformation from a file (if the filename is provided but not a repository object) or from a repository - * (if the repository object, repository directory name, and transformation name are specified). - * - * @param parent - * the parent variable space and named params - * @param rep - * the repository - * @param name - * the name of the transformation - * @param dirname - * the dirname the repository directory name - * @param filename - * the filename containing the transformation definition - * @throws KettleException - * if any error occurs during loading, parsing, or creation of the transformation - */ - public Trans( Parent parent, Repository rep, String name, String dirname, - String filename ) throws KettleException { - this(); - try { - if ( rep != null ) { - RepositoryDirectoryInterface repdir = rep.findDirectory( dirname ); - if ( repdir != null ) { - this.transMeta = rep.loadTransformation( name, repdir, null, false, null ); // reads last version - } else { - throw new KettleException( BaseMessages.getString( PKG, "Trans.Exception.UnableToLoadTransformation", name, - dirname ) ); - } - } else { - transMeta = new TransMeta( filename, false ); - } - - this.log = LogChannel.GENERAL; - - transMeta.initializeVariablesFrom( parent ); - initializeVariablesFrom( parent ); - // PDI-3064 do not erase parameters from meta! - // instead of this - copy parameters to actual transformation - this.copyParametersFrom( parent ); - this.activateParameters(); - - this.setDefaultLogCommitSize(); - - // Get a valid transactionId in case we run database transactional. - transactionId = calculateTransactionId(); - threadName = transactionId; // / backward compatibility but deprecated! - } catch ( KettleException e ) { - throw new KettleException( BaseMessages.getString( PKG, "Trans.Exception.UnableToOpenTransformation", name ), e ); - } - } - - /** - * Executes the transformation. This method will prepare the transformation for execution and then start all the - * threads associated with the transformation and its steps. - * - * @param arguments - * the arguments - * @throws KettleException - * if the transformation could not be prepared (initialized) - */ - public void execute( String[] arguments ) throws KettleException { - prepareExecution( arguments ); - startThreads(); - } - - /** - * Prepares the transformation for execution. This includes setting the arguments and parameters as well as preparing - * and tracking the steps and hops in the transformation. - * - * @param arguments - * the arguments to use for this transformation - * @throws KettleException - * in case the transformation could not be prepared (initialized) - */ - public void prepareExecution( String[] arguments ) throws KettleException { - preparing = true; - startDate = null; - running = false; - - log.snap( Metrics.METRIC_TRANSFORMATION_EXECUTION_START ); - log.snap( Metrics.METRIC_TRANSFORMATION_INIT_START ); - - ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.TransformationPrepareExecution.id, this ); - - checkCompatibility(); - - // Set the arguments on the transformation... - // - if ( arguments != null ) { - setArguments( arguments ); - } - - activateParameters(); - transMeta.activateParameters(); - - if ( transMeta.getName() == null ) { - if ( transMeta.getFilename() != null ) { - log.logBasic( BaseMessages.getString( PKG, "Trans.Log.DispacthingStartedForFilename", transMeta - .getFilename() ) ); - } - } else { - log.logBasic( BaseMessages.getString( PKG, "Trans.Log.DispacthingStartedForTransformation", transMeta - .getName() ) ); - } - - if ( getArguments() != null ) { - if ( log.isDetailed() ) { - log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.NumberOfArgumentsDetected", String.valueOf( - getArguments().length ) ) ); - } - } - - if ( isSafeModeEnabled() ) { - if ( log.isDetailed() ) { - log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.SafeModeIsEnabled", transMeta.getName() ) ); - } - } - - if ( getReplayDate() != null ) { - SimpleDateFormat df = new SimpleDateFormat( REPLAY_DATE_FORMAT ); - log.logBasic( BaseMessages.getString( PKG, "Trans.Log.ThisIsAReplayTransformation" ) + df.format( - getReplayDate() ) ); - } else { - if ( log.isDetailed() ) { - log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.ThisIsNotAReplayTransformation" ) ); - } - } - - // setInternalKettleVariables(this); --> Let's not do this, when running - // without file, for example remote, it spoils the fun - - // extra check to see if the servlet print writer has some value in case - // folks want to test it locally... - // - if ( servletPrintWriter == null ) { - String encoding = System.getProperty( "KETTLE_DEFAULT_SERVLET_ENCODING", null ); - if ( encoding == null ) { - servletPrintWriter = new PrintWriter( new OutputStreamWriter( System.out ) ); - } else { - try { - servletPrintWriter = new PrintWriter( new OutputStreamWriter( System.out, encoding ) ); - } catch ( UnsupportedEncodingException ex ) { - servletPrintWriter = new PrintWriter( new OutputStreamWriter( System.out ) ); + // If we don't have arguments set and TransMeta has, someone has been using a deprecated method. + // + if (transMeta.getArguments() != null && getArguments() == null) { + setArguments(transMeta.getArguments()); } - } } - // Keep track of all the row sets and allocated steps - // - steps = new ArrayList(); - rowsets = new ArrayList(); + /** + * Starts the threads prepared by prepareThreads(). Before you start the threads, you can add RowListeners to them. + * + * @throws KettleException if there is a communication error with a remote output socket. + */ + public void startThreads() throws KettleException { + // Now prepare to start all the threads... + // + nrOfFinishedSteps = 0; + nrOfActiveSteps = 0; - List hopsteps = transMeta.getTransHopSteps( false ); + ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.TransformationStartThreads.id, this); - if ( log.isDetailed() ) { - log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.FoundDefferentSteps", String.valueOf( hopsteps - .size() ) ) ); - log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.AllocatingRowsets" ) ); - } - // First allocate all the rowsets required! - // Note that a mapping doesn't receive ANY input or output rowsets... - // - for ( int i = 0; i < hopsteps.size(); i++ ) { - StepMeta thisStep = hopsteps.get( i ); - if ( thisStep.isMapping() ) { - continue; // handled and allocated by the mapping step itself. - } + fireTransStartedListeners(); - if ( log.isDetailed() ) { - log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.AllocateingRowsetsForStep", String.valueOf( i ), - thisStep.getName() ) ); - } + for (int i = 0; i < steps.size(); i++) { + final StepMetaDataCombi sid = steps.get(i); + sid.step.markStart(); + sid.step.initBeforeStart(); - List nextSteps = transMeta.findNextSteps( thisStep ); - int nrTargets = nextSteps.size(); + // also attach a Step Listener to detect when we're done... + // + StepListener stepListener = new StepListener() { + public void stepActive(Trans trans, StepMeta stepMeta, StepInterface step) { + nrOfActiveSteps++; + if (nrOfActiveSteps == 1) { + // Transformation goes from in-active to active... + // PDI-5229 sync added + synchronized (transListeners) { + for (TransListener listener : transListeners) { + listener.transActive(Trans.this); + } + } + } + } - for ( int n = 0; n < nrTargets; n++ ) { - // What's the next step? - StepMeta nextStep = nextSteps.get( n ); - if ( nextStep.isMapping() ) { - continue; // handled and allocated by the mapping step itself. + public void stepFinished(Trans trans, StepMeta stepMeta, StepInterface step) { + synchronized (Trans.this) { + nrOfFinishedSteps++; + + if (nrOfFinishedSteps >= steps.size()) { + // Set the finished flag + // + setFinished(true); + + // Grab the performance statistics one last time (if enabled) + // + addStepPerformanceSnapShot(); + + try { + fireTransFinishedListeners(); + } catch (Exception e) { + step.setErrors(step.getErrors() + 1L); + log.logError(getName() + " : " + BaseMessages.getString(PKG, + "Trans.Log.UnexpectedErrorAtTransformationEnd"), e); + } + } + + // If a step fails with an error, we want to kill/stop the others + // too... + // + if (step.getErrors() > 0) { + + log.logMinimal(BaseMessages.getString(PKG, "Trans.Log.TransformationDetectedErrors")); + log.logMinimal(BaseMessages.getString(PKG, "Trans.Log.TransformationIsKillingTheOtherSteps")); + + killAllNoWait(); + } + } + } + }; + // Make sure this is called first! + // + if (sid.step instanceof BaseStep) { + ((BaseStep) sid.step).getStepListeners().add(0, stepListener); + } else { + sid.step.addStepListener(stepListener); + } } - // How many times do we start the source step? - int thisCopies = thisStep.getCopies(); + if (transMeta.isCapturingStepPerformanceSnapShots()) { + stepPerformanceSnapshotSeqNr = new AtomicInteger(0); + stepPerformanceSnapShots = new ConcurrentHashMap>(); - if ( thisCopies < 0 ) { - // This can only happen if a variable is used that didn't resolve to a positive integer value - // - throw new KettleException( BaseMessages.getString( PKG, "Trans.Log.StepCopiesNotCorrectlyDefined", thisStep - .getName() ) ); + // Calculate the maximum number of snapshots to be kept in memory + // + String limitString = environmentSubstitute(transMeta.getStepPerformanceCapturingSizeLimit()); + if (Const.isEmpty(limitString)) { + limitString = EnvUtil.getSystemProperty(Const.KETTLE_STEP_PERFORMANCE_SNAPSHOT_LIMIT); + } + stepPerformanceSnapshotSizeLimit = Const.toInt(limitString, 0); + + // Set a timer to collect the performance data from the running threads... + // + stepPerformanceSnapShotTimer = new Timer("stepPerformanceSnapShot Timer: " + transMeta.getName()); + TimerTask timerTask = new TimerTask() { + public void run() { + if (!isFinished()) { + addStepPerformanceSnapShot(); + } + } + }; + stepPerformanceSnapShotTimer.schedule(timerTask, 100, transMeta.getStepPerformanceCapturingDelay()); } - // How many times do we start the target step? - int nextCopies = nextStep.getCopies(); + // Now start a thread to monitor the running transformation... + // + setFinished(false); + paused.set(false); + stopped.set(false); - // Are we re-partitioning? - boolean repartitioning; - if ( thisStep.isPartitioned() ) { - repartitioning = !thisStep.getStepPartitioningMeta().equals( nextStep.getStepPartitioningMeta() ); - } else { - repartitioning = nextStep.isPartitioned(); - } - - int nrCopies; - if ( log.isDetailed() ) { - log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.copiesInfo", String.valueOf( thisCopies ), String - .valueOf( nextCopies ) ) ); - } - int dispatchType; - if ( thisCopies == 1 && nextCopies == 1 ) { - dispatchType = TYPE_DISP_1_1; - nrCopies = 1; - } else if ( thisCopies == 1 && nextCopies > 1 ) { - dispatchType = TYPE_DISP_1_N; - nrCopies = nextCopies; - } else if ( thisCopies > 1 && nextCopies == 1 ) { - dispatchType = TYPE_DISP_N_1; - nrCopies = thisCopies; - } else if ( thisCopies == nextCopies && !repartitioning ) { - dispatchType = TYPE_DISP_N_N; - nrCopies = nextCopies; - } else { - // > 1! - dispatchType = TYPE_DISP_N_M; - nrCopies = nextCopies; - } // Allocate a rowset for each destination step + transFinishedBlockingQueue = new ArrayBlockingQueue(10); - // Allocate the rowsets - // - if ( dispatchType != TYPE_DISP_N_M ) { - for ( int c = 0; c < nrCopies; c++ ) { - RowSet rowSet; - switch ( transMeta.getTransformationType() ) { - case Normal: - // This is a temporary patch until the batching rowset has proven - // to be working in all situations. - // Currently there are stalling problems when dealing with small - // amounts of rows. + TransListener transListener = new TransAdapter() { + public void transFinished(Trans trans) { + + try { + shutdownHeartbeat(trans != null ? trans.heartbeat : null); + + ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.TransformationFinish.id, trans); + } catch (KettleException e) { + throw new RuntimeException("Error calling extension point at end of transformation", e); + } + + // First of all, stop the performance snapshot timer if there is is + // one... // - Boolean batchingRowSet = - ValueMeta.convertStringToBoolean( System.getProperty( Const.KETTLE_BATCHING_ROWSET ) ); - if ( batchingRowSet != null && batchingRowSet.booleanValue() ) { - rowSet = new BlockingBatchingRowSet( transMeta.getSizeRowset() ); - } else { - rowSet = new BlockingRowSet( transMeta.getSizeRowset() ); + if (transMeta.isCapturingStepPerformanceSnapShots() && stepPerformanceSnapShotTimer != null) { + stepPerformanceSnapShotTimer.cancel(); } - break; - case SerialSingleThreaded: - rowSet = new SingleRowRowSet(); - break; + setFinished(true); + running = false; // no longer running - case SingleThreaded: - rowSet = new QueueRowSet(); - break; + log.snap(Metrics.METRIC_TRANSFORMATION_EXECUTION_STOP); + + // If the user ran with metrics gathering enabled and a metrics logging table is configured, add another + // listener... + // + MetricsLogTable metricsLogTable = transMeta.getMetricsLogTable(); + if (metricsLogTable.isDefined()) { + try { + writeMetricsInformation(); + } catch (Exception e) { + log.logError("Error writing metrics information", e); + errors.incrementAndGet(); + } + } - default: - throw new KettleException( "Unhandled transformation type: " + transMeta.getTransformationType() ); + // Close the unique connections when running database transactionally. + // This will commit or roll back the transaction based on the result of this transformation. + // + if (transMeta.isUsingUniqueConnections()) { + trans.closeUniqueDatabaseConnections(getResult()); + } } + }; + // This should always be done first so that the other listeners achieve a clean state to start from (setFinished and + // so on) + // + transListeners.add(0, transListener); - switch ( dispatchType ) { - case TYPE_DISP_1_1: - rowSet.setThreadNameFromToCopy( thisStep.getName(), 0, nextStep.getName(), 0 ); - break; - case TYPE_DISP_1_N: - rowSet.setThreadNameFromToCopy( thisStep.getName(), 0, nextStep.getName(), c ); + running = true; + + switch (transMeta.getTransformationType()) { + case Normal: + + // Now start all the threads... + // + for (int i = 0; i < steps.size(); i++) { + final StepMetaDataCombi combi = steps.get(i); + RunThread runThread = new RunThread(combi); + Thread thread = new Thread(runThread); + thread.setName(getName() + " - " + combi.stepname); + ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.StepBeforeStart.id, combi); + // Call an extension point at the end of the step + // + combi.step.addStepListener(new StepAdapter() { + + @Override + public void stepFinished(Trans trans, StepMeta stepMeta, StepInterface step) { + try { + ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.StepFinished.id, combi); + } catch (KettleException e) { + throw new RuntimeException("Unexpected error in calling extension point upon step finish", e); + } + } + + }); + + thread.start(); + } break; - case TYPE_DISP_N_1: - rowSet.setThreadNameFromToCopy( thisStep.getName(), c, nextStep.getName(), 0 ); + + case SerialSingleThreaded: + new Thread(new Runnable() { + public void run() { + try { + // Always disable thread priority management, it will always slow us + // down... + // + for (StepMetaDataCombi combi : steps) { + combi.step.setUsingThreadPriorityManagment(false); + } + + // + // This is a single threaded version... + // + + // Sort the steps from start to finish... + // + Collections.sort(steps, new Comparator() { + public int compare(StepMetaDataCombi c1, StepMetaDataCombi c2) { + + boolean c1BeforeC2 = transMeta.findPrevious(c2.stepMeta, c1.stepMeta); + if (c1BeforeC2) { + return -1; + } else { + return 1; + } + } + }); + + boolean[] stepDone = new boolean[steps.size()]; + int nrDone = 0; + while (nrDone < steps.size() && !isStopped()) { + for (int i = 0; i < steps.size() && !isStopped(); i++) { + StepMetaDataCombi combi = steps.get(i); + if (!stepDone[i]) { + // if (combi.step.canProcessOneRow() || + // !combi.step.isRunning()) { + boolean cont = combi.step.processRow(combi.meta, combi.data); + if (!cont) { + stepDone[i] = true; + nrDone++; + } + // } + } + } + } + } catch (Exception e) { + errors.addAndGet(1); + log.logError("Error executing single threaded", e); + } finally { + for (int i = 0; i < steps.size(); i++) { + StepMetaDataCombi combi = steps.get(i); + combi.step.dispose(combi.meta, combi.data); + combi.step.markStop(); + } + } + } + }).start(); break; - case TYPE_DISP_N_N: - rowSet.setThreadNameFromToCopy( thisStep.getName(), c, nextStep.getName(), c ); + + case SingleThreaded: + // Don't do anything, this needs to be handled by the transformation + // executor! + // break; - default: + default: break; - } - rowsets.add( rowSet ); - if ( log.isDetailed() ) { - log.logDetailed( BaseMessages.getString( PKG, "Trans.TransformationAllocatedNewRowset", rowSet - .toString() ) ); - } - } - } else { - // For each N source steps we have M target steps - // - // From each input step we go to all output steps. - // This allows maximum flexibility for re-partitioning, - // distribution... - for ( int s = 0; s < thisCopies; s++ ) { - for ( int t = 0; t < nextCopies; t++ ) { - BlockingRowSet rowSet = new BlockingRowSet( transMeta.getSizeRowset() ); - rowSet.setThreadNameFromToCopy( thisStep.getName(), s, nextStep.getName(), t ); - rowsets.add( rowSet ); - if ( log.isDetailed() ) { - log.logDetailed( BaseMessages.getString( PKG, "Trans.TransformationAllocatedNewRowset", rowSet - .toString() ) ); - } - } - } - } - } - log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.AllocatedRowsets", String.valueOf( rowsets.size() ), - String.valueOf( i ), thisStep.getName() ) + " " ); - } - - if ( log.isDetailed() ) { - log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.AllocatingStepsAndStepData" ) ); - } - - // Allocate the steps & the data... - // - for ( int i = 0; i < hopsteps.size(); i++ ) { - StepMeta stepMeta = hopsteps.get( i ); - String stepid = stepMeta.getStepID(); - - if ( log.isDetailed() ) { - log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.TransformationIsToAllocateStep", stepMeta.getName(), - stepid ) ); - } - - // How many copies are launched of this step? - int nrCopies = stepMeta.getCopies(); - - if ( log.isDebug() ) { - log.logDebug( BaseMessages.getString( PKG, "Trans.Log.StepHasNumberRowCopies", String.valueOf( nrCopies ) ) ); - } - - // At least run once... - for ( int c = 0; c < nrCopies; c++ ) { - // Make sure we haven't started it yet! - if ( !hasStepStarted( stepMeta.getName(), c ) ) { - StepMetaDataCombi combi = new StepMetaDataCombi(); - - combi.stepname = stepMeta.getName(); - combi.copy = c; - - // The meta-data - combi.stepMeta = stepMeta; - combi.meta = stepMeta.getStepMetaInterface(); - - // Allocate the step data - StepDataInterface data = combi.meta.getStepData(); - combi.data = data; - - // Allocate the step - StepInterface step = combi.meta.getStep( stepMeta, data, c, transMeta, this ); - - // Copy the variables of the transformation to the step... - // don't share. Each copy of the step has its own variables. - // - step.initializeVariablesFrom( this ); - step.setUsingThreadPriorityManagment( transMeta.isUsingThreadPriorityManagment() ); - - // Pass the connected repository & metaStore to the steps runtime - // - step.setRepository( repository ); - step.setMetaStore( metaStore ); - - // If the step is partitioned, set the partitioning ID and some other - // things as well... - if ( stepMeta.isPartitioned() ) { - List partitionIDs = stepMeta.getStepPartitioningMeta().getPartitionSchema().getPartitionIDs(); - if ( partitionIDs != null && partitionIDs.size() > 0 ) { - step.setPartitionID( partitionIDs.get( c ) ); // Pass the partition ID - // to the step - } - } - - // Save the step too - combi.step = step; - - // Pass logging level and metrics gathering down to the step level. - // / - if ( combi.step instanceof LoggingObjectInterface ) { - LogChannelInterface logChannel = combi.step.getLogChannel(); - logChannel.setLogLevel( logLevel ); - logChannel.setGatheringMetrics( log.isGatheringMetrics() ); - } - - // Add to the bunch... - steps.add( combi ); - - if ( log.isDetailed() ) { - log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.TransformationHasAllocatedANewStep", stepMeta - .getName(), String.valueOf( c ) ) ); - } - } - } - } - - // Now we need to verify if certain rowsets are not meant to be for error - // handling... - // Loop over the steps and for every step verify the output rowsets - // If a rowset is going to a target step in the steps error handling - // metadata, set it to the errorRowSet. - // The input rowsets are already in place, so the next step just accepts the - // rows. - // Metadata wise we need to do the same trick in TransMeta - // - for ( int s = 0; s < steps.size(); s++ ) { - StepMetaDataCombi combi = steps.get( s ); - if ( combi.stepMeta.isDoingErrorHandling() ) { - combi.step.identifyErrorOutput(); - - } - } - - // Now (optionally) write start log record! - // Make sure we synchronize appropriately to avoid duplicate batch IDs. - // - Object syncObject = this; - if ( parentJob != null ) { - syncObject = parentJob; // parallel execution in a job - } - if ( parentTrans != null ) { - syncObject = parentTrans; // multiple sub-transformations - } - synchronized ( syncObject ) { - calculateBatchIdAndDateRange(); - beginProcessing(); - } - - // Set the partition-to-rowset mapping - // - for ( int i = 0; i < steps.size(); i++ ) { - StepMetaDataCombi sid = steps.get( i ); - - StepMeta stepMeta = sid.stepMeta; - StepInterface baseStep = sid.step; - - baseStep.setPartitioned( stepMeta.isPartitioned() ); - - // Now let's take a look at the source and target relation - // - // If this source step is not partitioned, and the target step is: it - // means we need to re-partition the incoming data. - // If both steps are partitioned on the same method and schema, we don't - // need to re-partition - // If both steps are partitioned on a different method or schema, we need - // to re-partition as well. - // If both steps are not partitioned, we don't need to re-partition - // - boolean isThisPartitioned = stepMeta.isPartitioned(); - PartitionSchema thisPartitionSchema = null; - if ( isThisPartitioned ) { - thisPartitionSchema = stepMeta.getStepPartitioningMeta().getPartitionSchema(); - } - - boolean isNextPartitioned = false; - StepPartitioningMeta nextStepPartitioningMeta = null; - PartitionSchema nextPartitionSchema = null; - - List nextSteps = transMeta.findNextSteps( stepMeta ); - int nrNext = nextSteps.size(); - for ( int p = 0; p < nrNext; p++ ) { - StepMeta nextStep = nextSteps.get( p ); - if ( nextStep.isPartitioned() ) { - isNextPartitioned = true; - nextStepPartitioningMeta = nextStep.getStepPartitioningMeta(); - nextPartitionSchema = nextStepPartitioningMeta.getPartitionSchema(); - } - } - - baseStep.setRepartitioning( StepPartitioningMeta.PARTITIONING_METHOD_NONE ); - - // If the next step is partitioned differently, set re-partitioning, when - // running locally. - // - if ( ( !isThisPartitioned && isNextPartitioned ) || ( isThisPartitioned && isNextPartitioned - && !thisPartitionSchema.equals( nextPartitionSchema ) ) ) { - baseStep.setRepartitioning( nextStepPartitioningMeta.getMethodType() ); - } - - // For partitioning to a set of remove steps (repartitioning from a master - // to a set or remote output steps) - // - StepPartitioningMeta targetStepPartitioningMeta = baseStep.getStepMeta().getTargetStepPartitioningMeta(); - if ( targetStepPartitioningMeta != null ) { - baseStep.setRepartitioning( targetStepPartitioningMeta.getMethodType() ); - } - } - - preparing = false; - initializing = true; - - // Do a topology sort... Over 150 step (copies) things might be slowing down too much. - // - if ( isMonitored() && steps.size() < 150 ) { - doTopologySortOfSteps(); - } - - if ( log.isDetailed() ) { - log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.InitialisingSteps", String.valueOf( steps.size() ) ) ); - } - - StepInitThread[] initThreads = new StepInitThread[steps.size()]; - Thread[] threads = new Thread[steps.size()]; - - // Initialize all the threads... - // - for ( int i = 0; i < steps.size(); i++ ) { - final StepMetaDataCombi sid = steps.get( i ); - - // Do the init code in the background! - // Init all steps at once, but ALL steps need to finish before we can - // continue properly! - // - initThreads[i] = new StepInitThread( sid, log ); - - // Put it in a separate thread! - // - threads[i] = new Thread( initThreads[i] ); - threads[i].setName( "init of " + sid.stepname + "." + sid.copy + " (" + threads[i].getName() + ")" ); - - ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.StepBeforeInitialize.id, initThreads[i] ); - - threads[i].start(); - } - - for ( int i = 0; i < threads.length; i++ ) { - try { - threads[i].join(); - ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.StepAfterInitialize.id, initThreads[i] ); - } catch ( Exception ex ) { - log.logError( "Error with init thread: " + ex.getMessage(), ex.getMessage() ); - log.logError( Const.getStackTracker( ex ) ); - } - } - - initializing = false; - boolean ok = true; - - // All step are initialized now: see if there was one that didn't do it - // correctly! - // - for ( int i = 0; i < initThreads.length; i++ ) { - StepMetaDataCombi combi = initThreads[i].getCombi(); - if ( !initThreads[i].isOk() ) { - log.logError( BaseMessages.getString( PKG, "Trans.Log.StepFailedToInit", combi.stepname + "." + combi.copy ) ); - combi.data.setStatus( StepExecutionStatus.STATUS_STOPPED ); - ok = false; - } else { - combi.data.setStatus( StepExecutionStatus.STATUS_IDLE ); - if ( log.isDetailed() ) { - log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.StepInitialized", combi.stepname + "." - + combi.copy ) ); - } - } - } - - if ( !ok ) { - // Halt the other threads as well, signal end-of-the line to the outside - // world... - // Also explicitly call dispose() to clean up resources opened during - // init(); - // - for ( int i = 0; i < initThreads.length; i++ ) { - StepMetaDataCombi combi = initThreads[i].getCombi(); - // Dispose will overwrite the status, but we set it back right after - // this. - combi.step.dispose( combi.meta, combi.data ); + } - if ( initThreads[i].isOk() ) { - combi.data.setStatus( StepExecutionStatus.STATUS_HALTED ); - } else { - combi.data.setStatus( StepExecutionStatus.STATUS_STOPPED ); - } - } - - // Just for safety, fire the trans finished listeners... - try { - fireTransFinishedListeners(); - } catch ( KettleException e ) { - // listeners produces errors - log.logError( BaseMessages.getString( PKG, "Trans.FinishListeners.Exception" ) ); - // we will not pass this exception up to prepareExecuton() entry point. - } finally { - // Flag the transformation as finished even if exception was thrown - setFinished( true ); - } - - // Pass along the log during preview. Otherwise it becomes hard to see - // what went wrong. - // - if ( preview ) { - String logText = KettleLogStore.getAppender().getBuffer( getLogChannelId(), true ).toString(); - throw new KettleException( BaseMessages.getString( PKG, "Trans.Log.FailToInitializeAtLeastOneStep" ) + Const.CR - + logText ); - } else { - throw new KettleException( BaseMessages.getString( PKG, "Trans.Log.FailToInitializeAtLeastOneStep" ) - + Const.CR ); - } - } - - log.snap( Metrics.METRIC_TRANSFORMATION_INIT_STOP ); - - KettleEnvironment.setExecutionInformation( this, repository ); - - readyToStart = true; - } - - @SuppressWarnings( "deprecation" ) - private void checkCompatibility() { - // If we don't have a previous result and transMeta does have one, someone has been using a deprecated method. - // - if ( transMeta.getPreviousResult() != null && getPreviousResult() == null ) { - setPreviousResult( transMeta.getPreviousResult() ); - } - - // If we don't have arguments set and TransMeta has, someone has been using a deprecated method. - // - if ( transMeta.getArguments() != null && getArguments() == null ) { - setArguments( transMeta.getArguments() ); - } - } - - /** - * Starts the threads prepared by prepareThreads(). Before you start the threads, you can add RowListeners to them. - * - * @throws KettleException - * if there is a communication error with a remote output socket. - */ - public void startThreads() throws KettleException { - // Now prepare to start all the threads... - // - nrOfFinishedSteps = 0; - nrOfActiveSteps = 0; - - ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.TransformationStartThreads.id, this ); - - fireTransStartedListeners(); - - for ( int i = 0; i < steps.size(); i++ ) { - final StepMetaDataCombi sid = steps.get( i ); - sid.step.markStart(); - sid.step.initBeforeStart(); - - // also attach a Step Listener to detect when we're done... - // - StepListener stepListener = new StepListener() { - public void stepActive( Trans trans, StepMeta stepMeta, StepInterface step ) { - nrOfActiveSteps++; - if ( nrOfActiveSteps == 1 ) { - // Transformation goes from in-active to active... - // PDI-5229 sync added - synchronized ( transListeners ) { - for ( TransListener listener : transListeners ) { - listener.transActive( Trans.this ); - } - } - } - } - - public void stepFinished( Trans trans, StepMeta stepMeta, StepInterface step ) { - synchronized ( Trans.this ) { - nrOfFinishedSteps++; - - if ( nrOfFinishedSteps >= steps.size() ) { - // Set the finished flag - // - setFinished( true ); - - // Grab the performance statistics one last time (if enabled) - // - addStepPerformanceSnapShot(); - - try { - fireTransFinishedListeners(); - } catch ( Exception e ) { - step.setErrors( step.getErrors() + 1L ); - log.logError( getName() + " : " + BaseMessages.getString( PKG, - "Trans.Log.UnexpectedErrorAtTransformationEnd" ), e ); - } - } + ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.TransformationStart.id, this); - // If a step fails with an error, we want to kill/stop the others - // too... - // - if ( step.getErrors() > 0 ) { + heartbeat = startHeartbeat(getHeartbeatIntervalInSeconds()); - log.logMinimal( BaseMessages.getString( PKG, "Trans.Log.TransformationDetectedErrors" ) ); - log.logMinimal( BaseMessages.getString( PKG, "Trans.Log.TransformationIsKillingTheOtherSteps" ) ); + if (log.isDetailed()) { + log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.TransformationHasAllocated", String.valueOf(steps + .size()), String.valueOf(rowsets.size()))); + } + } - killAllNoWait(); + /** + * Make attempt to fire all registered listeners if possible. + * + * @throws KettleException if any errors occur during notification + */ + protected void fireTransFinishedListeners() throws KettleException { + // PDI-5229 sync added + synchronized (transListeners) { + if (transListeners.size() == 0) { + return; + } + // prevent Exception from one listener to block others execution + List badGuys = new ArrayList(transListeners.size()); + for (TransListener transListener : transListeners) { + try { + transListener.transFinished(this); + } catch (KettleException e) { + badGuys.add(e); + } + } + // Signal for the the waitUntilFinished blocker... + transFinishedBlockingQueue.add(new Object()); + if (!badGuys.isEmpty()) { + // FIFO + throw new KettleException(badGuys.get(0)); } - } } - }; - // Make sure this is called first! - // - if ( sid.step instanceof BaseStep ) { - ( (BaseStep) sid.step ).getStepListeners().add( 0, stepListener ); - } else { - sid.step.addStepListener( stepListener ); - } } - if ( transMeta.isCapturingStepPerformanceSnapShots() ) { - stepPerformanceSnapshotSeqNr = new AtomicInteger( 0 ); - stepPerformanceSnapShots = new ConcurrentHashMap>(); - - // Calculate the maximum number of snapshots to be kept in memory - // - String limitString = environmentSubstitute( transMeta.getStepPerformanceCapturingSizeLimit() ); - if ( Const.isEmpty( limitString ) ) { - limitString = EnvUtil.getSystemProperty( Const.KETTLE_STEP_PERFORMANCE_SNAPSHOT_LIMIT ); - } - stepPerformanceSnapshotSizeLimit = Const.toInt( limitString, 0 ); - - // Set a timer to collect the performance data from the running threads... - // - stepPerformanceSnapShotTimer = new Timer( "stepPerformanceSnapShot Timer: " + transMeta.getName() ); - TimerTask timerTask = new TimerTask() { - public void run() { - if ( !isFinished() ) { - addStepPerformanceSnapShot(); - } + /** + * Fires the start-event listeners (if any are registered). + * + * @throws KettleException if any errors occur during notification + */ + protected void fireTransStartedListeners() throws KettleException { + // PDI-5229 sync added + synchronized (transListeners) { + for (TransListener transListener : transListeners) { + transListener.transStarted(this); + } } - }; - stepPerformanceSnapShotTimer.schedule( timerTask, 100, transMeta.getStepPerformanceCapturingDelay() ); } - // Now start a thread to monitor the running transformation... - // - setFinished( false ); - paused.set( false ); - stopped.set( false ); + /** + * Adds a step performance snapshot. + */ + protected void addStepPerformanceSnapShot() { - transFinishedBlockingQueue = new ArrayBlockingQueue( 10 ); + if (stepPerformanceSnapShots == null) { + return; // Race condition somewhere? + } - TransListener transListener = new TransAdapter() { - public void transFinished( Trans trans ) { + boolean pausedAndNotEmpty = isPaused() && !stepPerformanceSnapShots.isEmpty(); + boolean stoppedAndNotEmpty = isStopped() && !stepPerformanceSnapShots.isEmpty(); - try { - shutdownHeartbeat( trans != null ? trans.heartbeat : null ); + if (transMeta.isCapturingStepPerformanceSnapShots() && !pausedAndNotEmpty && !stoppedAndNotEmpty) { + // get the statistics from the steps and keep them... + // + int seqNr = stepPerformanceSnapshotSeqNr.incrementAndGet(); + for (int i = 0; i < steps.size(); i++) { + StepMeta stepMeta = steps.get(i).stepMeta; + StepInterface step = steps.get(i).step; + + StepPerformanceSnapShot snapShot = + new StepPerformanceSnapShot(seqNr, getBatchId(), new Date(), getName(), stepMeta.getName(), step.getCopy(), + step.getLinesRead(), step.getLinesWritten(), step.getLinesInput(), step.getLinesOutput(), step + .getLinesUpdated(), step.getLinesRejected(), step.getErrors()); + List snapShotList = stepPerformanceSnapShots.get(step.toString()); + StepPerformanceSnapShot previous; + if (snapShotList == null) { + snapShotList = new ArrayList(); + stepPerformanceSnapShots.put(step.toString(), snapShotList); + previous = null; + } else { + previous = snapShotList.get(snapShotList.size() - 1); // the last one... + } + // Make the difference... + // + snapShot.diff(previous, step.rowsetInputSize(), step.rowsetOutputSize()); + synchronized (stepPerformanceSnapShots) { + snapShotList.add(snapShot); - ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.TransformationFinish.id, trans ); - } catch ( KettleException e ) { - throw new RuntimeException( "Error calling extension point at end of transformation", e ); - } + if (stepPerformanceSnapshotSizeLimit > 0 && snapShotList.size() > stepPerformanceSnapshotSizeLimit) { + snapShotList.remove(0); + } + } + } - // First of all, stop the performance snapshot timer if there is is - // one... - // - if ( transMeta.isCapturingStepPerformanceSnapShots() && stepPerformanceSnapShotTimer != null ) { - stepPerformanceSnapShotTimer.cancel(); + lastStepPerformanceSnapshotSeqNrAdded = stepPerformanceSnapshotSeqNr.get(); } + } - setFinished( true ); - running = false; // no longer running - - log.snap( Metrics.METRIC_TRANSFORMATION_EXECUTION_STOP ); - - // If the user ran with metrics gathering enabled and a metrics logging table is configured, add another - // listener... + /** + * This method performs any cleanup operations, typically called after the transformation has finished. Specifically, + * after ALL the slave transformations in a clustered run have finished. + */ + public void cleanup() { + // Close all open server sockets. + // We can only close these after all processing has been confirmed to be finished. // - MetricsLogTable metricsLogTable = transMeta.getMetricsLogTable(); - if ( metricsLogTable.isDefined() ) { - try { - writeMetricsInformation(); - } catch ( Exception e ) { - log.logError( "Error writing metrics information", e ); - errors.incrementAndGet(); - } + if (steps == null) { + return; } - // Close the unique connections when running database transactionally. - // This will commit or roll back the transaction based on the result of this transformation. - // - if ( transMeta.isUsingUniqueConnections() ) { - trans.closeUniqueDatabaseConnections( getResult() ); + for (StepMetaDataCombi combi : steps) { + combi.step.cleanup(); } - } - }; - // This should always be done first so that the other listeners achieve a clean state to start from (setFinished and - // so on) - // - transListeners.add( 0, transListener ); + } + + /** + * Logs a summary message for the specified step. + * + * @param si the step interface + */ + public void logSummary(StepInterface si) { + log.logBasic(si.getStepname(), BaseMessages.getString(PKG, "Trans.Log.FinishedProcessing", String.valueOf(si + .getLinesInput()), String.valueOf(si.getLinesOutput()), String.valueOf(si.getLinesRead())) + BaseMessages + .getString(PKG, "Trans.Log.FinishedProcessing2", String.valueOf(si.getLinesWritten()), String.valueOf(si + .getLinesUpdated()), String.valueOf(si.getErrors()))); + } - running = true; + /** + * Waits until all RunThreads have finished. + */ + public void waitUntilFinished() { + try { + if (transFinishedBlockingQueue == null) { + return; + } + boolean wait = true; + while (wait) { + wait = transFinishedBlockingQueue.poll(1, TimeUnit.DAYS) == null; + } + } catch (InterruptedException e) { + throw new RuntimeException("Waiting for transformation to be finished interrupted!", e); + } + } - switch ( transMeta.getTransformationType() ) { - case Normal: + /** + * Gets the number of errors that have occurred during execution of the transformation. + * + * @return the number of errors + */ + public int getErrors() { + int nrErrors = errors.get(); - // Now start all the threads... - // - for ( int i = 0; i < steps.size(); i++ ) { - final StepMetaDataCombi combi = steps.get( i ); - RunThread runThread = new RunThread( combi ); - Thread thread = new Thread( runThread ); - thread.setName( getName() + " - " + combi.stepname ); - ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.StepBeforeStart.id, combi ); - // Call an extension point at the end of the step - // - combi.step.addStepListener( new StepAdapter() { + if (steps == null) { + return nrErrors; + } - @Override - public void stepFinished( Trans trans, StepMeta stepMeta, StepInterface step ) { - try { - ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.StepFinished.id, combi ); - } catch ( KettleException e ) { - throw new RuntimeException( "Unexpected error in calling extension point upon step finish", e ); - } + for (int i = 0; i < steps.size(); i++) { + StepMetaDataCombi sid = steps.get(i); + if (sid.step.getErrors() != 0L) { + nrErrors += sid.step.getErrors(); } + } + if (nrErrors > 0) { + log.logError(BaseMessages.getString(PKG, "Trans.Log.TransformationErrorsDetected")); + } + + return nrErrors; + } - } ); + /** + * Gets the number of steps in the transformation that are in an end state, such as Finished, Halted, or Stopped. + * + * @return the number of ended steps + */ + public int getEnded() { + int nrEnded = 0; - thread.start(); + if (steps == null) { + return 0; } - break; - case SerialSingleThreaded: - new Thread( new Runnable() { - public void run() { - try { - // Always disable thread priority management, it will always slow us - // down... - // - for ( StepMetaDataCombi combi : steps ) { - combi.step.setUsingThreadPriorityManagment( false ); - } - - // - // This is a single threaded version... - // - - // Sort the steps from start to finish... - // - Collections.sort( steps, new Comparator() { - public int compare( StepMetaDataCombi c1, StepMetaDataCombi c2 ) { - - boolean c1BeforeC2 = transMeta.findPrevious( c2.stepMeta, c1.stepMeta ); - if ( c1BeforeC2 ) { - return -1; - } else { - return 1; - } - } - } ); - - boolean[] stepDone = new boolean[steps.size()]; - int nrDone = 0; - while ( nrDone < steps.size() && !isStopped() ) { - for ( int i = 0; i < steps.size() && !isStopped(); i++ ) { - StepMetaDataCombi combi = steps.get( i ); - if ( !stepDone[i] ) { - // if (combi.step.canProcessOneRow() || - // !combi.step.isRunning()) { - boolean cont = combi.step.processRow( combi.meta, combi.data ); - if ( !cont ) { - stepDone[i] = true; - nrDone++; - } - // } - } - } - } - } catch ( Exception e ) { - errors.addAndGet( 1 ); - log.logError( "Error executing single threaded", e ); - } finally { - for ( int i = 0; i < steps.size(); i++ ) { - StepMetaDataCombi combi = steps.get( i ); - combi.step.dispose( combi.meta, combi.data ); - combi.step.markStop(); - } - } - } - } ).start(); - break; - - case SingleThreaded: - // Don't do anything, this needs to be handled by the transformation - // executor! - // - break; - default: - break; + for (int i = 0; i < steps.size(); i++) { + StepMetaDataCombi sid = steps.get(i); + StepDataInterface data = sid.data; + + if ((sid.step != null && !sid.step.isRunning()) + // Should normally not be needed anymore, status is kept in data. + || data.getStatus() == StepExecutionStatus.STATUS_FINISHED || // Finished processing + data.getStatus() == StepExecutionStatus.STATUS_HALTED || // Not launching because of init error + data.getStatus() == StepExecutionStatus.STATUS_STOPPED // Stopped because of an error + ) { + nrEnded++; + } + } + return nrEnded; } - ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.TransformationStart.id, this ); + /** + * Checks if the transformation is finished\. + * + * @return true if the transformation is finished, false otherwise + */ + public boolean isFinished() { + return finished.get(); + } - heartbeat = startHeartbeat( getHeartbeatIntervalInSeconds() ); + private void setFinished(boolean newValue) { + finished.set(newValue); + } - if ( log.isDetailed() ) { - log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.TransformationHasAllocated", String.valueOf( steps - .size() ), String.valueOf( rowsets.size() ) ) ); + public boolean isFinishedOrStopped() { + return isFinished() || isStopped(); } - } - /** - * Make attempt to fire all registered listeners if possible. - * - * @throws KettleException - * if any errors occur during notification - */ - protected void fireTransFinishedListeners() throws KettleException { - // PDI-5229 sync added - synchronized ( transListeners ) { - if ( transListeners.size() == 0 ) { - return; - } - // prevent Exception from one listener to block others execution - List badGuys = new ArrayList( transListeners.size() ); - for ( TransListener transListener : transListeners ) { - try { - transListener.transFinished( this ); - } catch ( KettleException e ) { - badGuys.add( e ); - } - } - // Signal for the the waitUntilFinished blocker... - transFinishedBlockingQueue.add( new Object() ); - if ( !badGuys.isEmpty() ) { - // FIFO - throw new KettleException( badGuys.get( 0 ) ); - } - } - } - - /** - * Fires the start-event listeners (if any are registered). - * - * @throws KettleException - * if any errors occur during notification - */ - protected void fireTransStartedListeners() throws KettleException { - // PDI-5229 sync added - synchronized ( transListeners ) { - for ( TransListener transListener : transListeners ) { - transListener.transStarted( this ); - } - } - } - - /** - * Adds a step performance snapshot. - */ - protected void addStepPerformanceSnapShot() { - - if ( stepPerformanceSnapShots == null ) { - return; // Race condition somewhere? - } - - boolean pausedAndNotEmpty = isPaused() && !stepPerformanceSnapShots.isEmpty(); - boolean stoppedAndNotEmpty = isStopped() && !stepPerformanceSnapShots.isEmpty(); - - if ( transMeta.isCapturingStepPerformanceSnapShots() && !pausedAndNotEmpty && !stoppedAndNotEmpty ) { - // get the statistics from the steps and keep them... - // - int seqNr = stepPerformanceSnapshotSeqNr.incrementAndGet(); - for ( int i = 0; i < steps.size(); i++ ) { - StepMeta stepMeta = steps.get( i ).stepMeta; - StepInterface step = steps.get( i ).step; - - StepPerformanceSnapShot snapShot = - new StepPerformanceSnapShot( seqNr, getBatchId(), new Date(), getName(), stepMeta.getName(), step.getCopy(), - step.getLinesRead(), step.getLinesWritten(), step.getLinesInput(), step.getLinesOutput(), step - .getLinesUpdated(), step.getLinesRejected(), step.getErrors() ); - List snapShotList = stepPerformanceSnapShots.get( step.toString() ); - StepPerformanceSnapShot previous; - if ( snapShotList == null ) { - snapShotList = new ArrayList(); - stepPerformanceSnapShots.put( step.toString(), snapShotList ); - previous = null; - } else { - previous = snapShotList.get( snapShotList.size() - 1 ); // the last one... + /** + * Attempts to stops all running steps and subtransformations. If all steps have finished, the transformation is + * marked as Finished. + */ + public void killAll() { + if (steps == null) { + return; } - // Make the difference... - // - snapShot.diff( previous, step.rowsetInputSize(), step.rowsetOutputSize() ); - synchronized ( stepPerformanceSnapShots ) { - snapShotList.add( snapShot ); - - if ( stepPerformanceSnapshotSizeLimit > 0 && snapShotList.size() > stepPerformanceSnapshotSizeLimit ) { - snapShotList.remove( 0 ); - } - } - } - - lastStepPerformanceSnapshotSeqNrAdded = stepPerformanceSnapshotSeqNr.get(); - } - } - - /** - * This method performs any cleanup operations, typically called after the transformation has finished. Specifically, - * after ALL the slave transformations in a clustered run have finished. - */ - public void cleanup() { - // Close all open server sockets. - // We can only close these after all processing has been confirmed to be finished. - // - if ( steps == null ) { - return; - } - - for ( StepMetaDataCombi combi : steps ) { - combi.step.cleanup(); - } - } - - /** - * Logs a summary message for the specified step. - * - * @param si - * the step interface - */ - public void logSummary( StepInterface si ) { - log.logBasic( si.getStepname(), BaseMessages.getString( PKG, "Trans.Log.FinishedProcessing", String.valueOf( si - .getLinesInput() ), String.valueOf( si.getLinesOutput() ), String.valueOf( si.getLinesRead() ) ) + BaseMessages - .getString( PKG, "Trans.Log.FinishedProcessing2", String.valueOf( si.getLinesWritten() ), String.valueOf( si - .getLinesUpdated() ), String.valueOf( si.getErrors() ) ) ); - } - - /** - * Waits until all RunThreads have finished. - */ - public void waitUntilFinished() { - try { - if ( transFinishedBlockingQueue == null ) { - return; - } - boolean wait = true; - while ( wait ) { - wait = transFinishedBlockingQueue.poll( 1, TimeUnit.DAYS ) == null; - } - } catch ( InterruptedException e ) { - throw new RuntimeException( "Waiting for transformation to be finished interrupted!", e ); - } - } - - /** - * Gets the number of errors that have occurred during execution of the transformation. - * - * @return the number of errors - */ - public int getErrors() { - int nrErrors = errors.get(); - - if ( steps == null ) { - return nrErrors; - } - - for ( int i = 0; i < steps.size(); i++ ) { - StepMetaDataCombi sid = steps.get( i ); - if ( sid.step.getErrors() != 0L ) { - nrErrors += sid.step.getErrors(); - } - } - if ( nrErrors > 0 ) { - log.logError( BaseMessages.getString( PKG, "Trans.Log.TransformationErrorsDetected" ) ); - } - - return nrErrors; - } - - /** - * Gets the number of steps in the transformation that are in an end state, such as Finished, Halted, or Stopped. - * - * @return the number of ended steps - */ - public int getEnded() { - int nrEnded = 0; - - if ( steps == null ) { - return 0; - } - - for ( int i = 0; i < steps.size(); i++ ) { - StepMetaDataCombi sid = steps.get( i ); - StepDataInterface data = sid.data; - - if ( ( sid.step != null && !sid.step.isRunning() ) - // Should normally not be needed anymore, status is kept in data. - || data.getStatus() == StepExecutionStatus.STATUS_FINISHED || // Finished processing - data.getStatus() == StepExecutionStatus.STATUS_HALTED || // Not launching because of init error - data.getStatus() == StepExecutionStatus.STATUS_STOPPED // Stopped because of an error - ) { - nrEnded++; - } - } - - return nrEnded; - } - - /** - * Checks if the transformation is finished\. - * - * @return true if the transformation is finished, false otherwise - */ - public boolean isFinished() { - return finished.get(); - } - - private void setFinished( boolean newValue ) { - finished.set( newValue ); - } - - public boolean isFinishedOrStopped() { - return isFinished() || isStopped(); - } - - /** - * Attempts to stops all running steps and subtransformations. If all steps have finished, the transformation is - * marked as Finished. - */ - public void killAll() { - if ( steps == null ) { - return; - } - - int nrStepsFinished = 0; - - for ( int i = 0; i < steps.size(); i++ ) { - StepMetaDataCombi sid = steps.get( i ); - - if ( log.isDebug() ) { - log.logDebug( BaseMessages.getString( PKG, "Trans.Log.LookingAtStep" ) + sid.step.getStepname() ); - } - - // If thr is a mapping, this is cause for an endless loop - // - while ( sid.step.isRunning() ) { - sid.step.stopAll(); - try { - Thread.sleep( 20 ); - } catch ( Exception e ) { - log.logError( BaseMessages.getString( PKG, "Trans.Log.TransformationErrors" ) + e.toString() ); - return; - } - } - - if ( !sid.step.isRunning() ) { - nrStepsFinished++; - } - } - - if ( nrStepsFinished == steps.size() ) { - setFinished( true ); - } - } - - /** - * Asks all steps to stop but doesn't wait around for it to happen. This is a special method for use with mappings. - */ - private void killAllNoWait() { - if ( steps == null ) { - return; - } - - for ( int i = 0; i < steps.size(); i++ ) { - StepMetaDataCombi sid = steps.get( i ); - StepInterface step = sid.step; - - if ( log.isDebug() ) { - log.logDebug( BaseMessages.getString( PKG, "Trans.Log.LookingAtStep" ) + step.getStepname() ); - } - - step.stopAll(); - try { - Thread.sleep( 20 ); - } catch ( Exception e ) { - log.logError( BaseMessages.getString( PKG, "Trans.Log.TransformationErrors" ) + e.toString() ); - return; - } - } - } - - /** - * Logs the execution statistics for the transformation for the specified time interval. If the total length of - * execution is supplied as the interval, then the statistics represent the average throughput (lines - * read/written/updated/rejected/etc. per second) for the entire execution. - * - * @param seconds - * the time interval (in seconds) - */ - public void printStats( int seconds ) { - log.logBasic( " " ); - if ( steps == null ) { - return; - } - - for ( int i = 0; i < steps.size(); i++ ) { - StepMetaDataCombi sid = steps.get( i ); - StepInterface step = sid.step; - long proc = step.getProcessed(); - if ( seconds != 0 ) { - if ( step.getErrors() == 0 ) { - log.logBasic( BaseMessages.getString( PKG, "Trans.Log.ProcessSuccessfullyInfo", step.getStepname(), "." + step - .getCopy(), String.valueOf( proc ), String.valueOf( ( proc / seconds ) ) ) ); - } else { - log.logError( BaseMessages.getString( PKG, "Trans.Log.ProcessErrorInfo", step.getStepname(), "." + step - .getCopy(), String.valueOf( step.getErrors() ), String.valueOf( proc ), String.valueOf( proc - / seconds ) ) ); - } - } else { - if ( step.getErrors() == 0 ) { - log.logBasic( BaseMessages.getString( PKG, "Trans.Log.ProcessSuccessfullyInfo", step.getStepname(), "." + step - .getCopy(), String.valueOf( proc ), seconds != 0 ? String.valueOf( ( proc / seconds ) ) : "-" ) ); + + int nrStepsFinished = 0; + + for (int i = 0; i < steps.size(); i++) { + StepMetaDataCombi sid = steps.get(i); + + if (log.isDebug()) { + log.logDebug(BaseMessages.getString(PKG, "Trans.Log.LookingAtStep") + sid.step.getStepname()); + } + + // If thr is a mapping, this is cause for an endless loop + // + while (sid.step.isRunning()) { + sid.step.stopAll(); + try { + Thread.sleep(20); + } catch (Exception e) { + log.logError(BaseMessages.getString(PKG, "Trans.Log.TransformationErrors") + e.toString()); + return; + } + } + + if (!sid.step.isRunning()) { + nrStepsFinished++; + } + } + + if (nrStepsFinished == steps.size()) { + setFinished(true); + } + } + + /** + * Asks all steps to stop but doesn't wait around for it to happen. This is a special method for use with mappings. + */ + private void killAllNoWait() { + if (steps == null) { + return; + } + + for (int i = 0; i < steps.size(); i++) { + StepMetaDataCombi sid = steps.get(i); + StepInterface step = sid.step; + + if (log.isDebug()) { + log.logDebug(BaseMessages.getString(PKG, "Trans.Log.LookingAtStep") + step.getStepname()); + } + + step.stopAll(); + try { + Thread.sleep(20); + } catch (Exception e) { + log.logError(BaseMessages.getString(PKG, "Trans.Log.TransformationErrors") + e.toString()); + return; + } + } + } + + /** + * Logs the execution statistics for the transformation for the specified time interval. If the total length of + * execution is supplied as the interval, then the statistics represent the average throughput (lines + * read/written/updated/rejected/etc. per second) for the entire execution. + * + * @param seconds the time interval (in seconds) + */ + public void printStats(int seconds) { + log.logBasic(" "); + if (steps == null) { + return; + } + + for (int i = 0; i < steps.size(); i++) { + StepMetaDataCombi sid = steps.get(i); + StepInterface step = sid.step; + long proc = step.getProcessed(); + if (seconds != 0) { + if (step.getErrors() == 0) { + log.logBasic(BaseMessages.getString(PKG, "Trans.Log.ProcessSuccessfullyInfo", step.getStepname(), "." + step + .getCopy(), String.valueOf(proc), String.valueOf((proc / seconds)))); + } else { + log.logError(BaseMessages.getString(PKG, "Trans.Log.ProcessErrorInfo", step.getStepname(), "." + step + .getCopy(), String.valueOf(step.getErrors()), String.valueOf(proc), String.valueOf(proc + / seconds))); + } + } else { + if (step.getErrors() == 0) { + log.logBasic(BaseMessages.getString(PKG, "Trans.Log.ProcessSuccessfullyInfo", step.getStepname(), "." + step + .getCopy(), String.valueOf(proc), seconds != 0 ? String.valueOf((proc / seconds)) : "-")); + } else { + log.logError(BaseMessages.getString(PKG, "Trans.Log.ProcessErrorInfo2", step.getStepname(), "." + step + .getCopy(), String.valueOf(step.getErrors()), String.valueOf(proc), String.valueOf(seconds))); + } + } + } + } + + /** + * Gets a representable metric of the "processed" lines of the last step. + * + * @return the number of lines processed by the last step + */ + public long getLastProcessed() { + if (steps == null || steps.size() == 0) { + return 0L; + } + StepMetaDataCombi sid = steps.get(steps.size() - 1); + return sid.step.getProcessed(); + } + + /** + * Finds the RowSet with the specified name. + * + * @param rowsetname the rowsetname + * @return the row set, or null if none found + */ + public RowSet findRowSet(String rowsetname) { + // Start with the transformation. + for (int i = 0; i < rowsets.size(); i++) { + // log.logDetailed("DIS: looking for RowSet ["+rowsetname+"] in nr "+i+" of "+threads.size()+" threads..."); + RowSet rs = rowsets.get(i); + if (rs.getName().equalsIgnoreCase(rowsetname)) { + return rs; + } + } + + return null; + } + + /** + * Finds the RowSet between two steps (or copies of steps). + * + * @param from the name of the "from" step + * @param fromcopy the copy number of the "from" step + * @param to the name of the "to" step + * @param tocopy the copy number of the "to" step + * @return the row set, or null if none found + */ + public RowSet findRowSet(String from, int fromcopy, String to, int tocopy) { + // Start with the transformation. + for (int i = 0; i < rowsets.size(); i++) { + RowSet rs = rowsets.get(i); + if (rs.getOriginStepName().equalsIgnoreCase(from) && rs.getDestinationStepName().equalsIgnoreCase(to) && rs + .getOriginStepCopy() == fromcopy && rs.getDestinationStepCopy() == tocopy) { + return rs; + } + } + + return null; + } + + /** + * Checks whether the specified step (or step copy) has started. + * + * @param sname the step name + * @param copy the copy number + * @return true the specified step (or step copy) has started, false otherwise + */ + public boolean hasStepStarted(String sname, int copy) { + // log.logDetailed("DIS: Checking wether of not ["+sname+"]."+cnr+" has started!"); + // log.logDetailed("DIS: hasStepStarted() looking in "+threads.size()+" threads"); + for (int i = 0; i < steps.size(); i++) { + StepMetaDataCombi sid = steps.get(i); + boolean started = (sid.stepname != null && sid.stepname.equalsIgnoreCase(sname)) && sid.copy == copy; + if (started) { + return true; + } + } + return false; + } + + /** + * Stops all steps from running, and alerts any registered listeners. + */ + public void stopAll() { + if (steps == null) { + return; + } + + // log.logDetailed("DIS: Checking wether of not ["+sname+"]."+cnr+" has started!"); + // log.logDetailed("DIS: hasStepStarted() looking in "+threads.size()+" threads"); + for (int i = 0; i < steps.size(); i++) { + StepMetaDataCombi sid = steps.get(i); + StepInterface rt = sid.step; + rt.setStopped(true); + rt.resumeRunning(); + + // Cancel queries etc. by force... + StepInterface si = rt; + try { + si.stopRunning(sid.meta, sid.data); + } catch (Exception e) { + log.logError("Something went wrong while trying to stop the transformation: " + e.toString()); + log.logError(Const.getStackTracker(e)); + } + + sid.data.setStatus(StepExecutionStatus.STATUS_STOPPED); + } + + // if it is stopped it is not paused + paused.set(false); + stopped.set(true); + + // Fire the stopped listener... + // + synchronized (transStoppedListeners) { + for (TransStoppedListener listener : transStoppedListeners) { + listener.transStopped(this); + } + } + } + + /** + * Gets the number of steps in this transformation. + * + * @return the number of steps + */ + public int nrSteps() { + if (steps == null) { + return 0; + } + return steps.size(); + } + + /** + * Gets the number of active (i.e. not finished) steps in this transformation + * + * @return the number of active steps + */ + public int nrActiveSteps() { + if (steps == null) { + return 0; + } + + int nr = 0; + for (int i = 0; i < steps.size(); i++) { + StepMetaDataCombi sid = steps.get(i); + // without also considering a step status of not finished, + // the step execution results grid shows empty while + // the transformation has steps still running. + // if ( sid.step.isRunning() ) nr++; + if (sid.step.isRunning() || sid.step.getStatus() != StepExecutionStatus.STATUS_FINISHED) { + nr++; + } + } + return nr; + } + + /** + * Checks whether the transformation steps are running lookup. + * + * @return a boolean array associated with the step list, indicating whether that step is running a lookup. + */ + public boolean[] getTransStepIsRunningLookup() { + if (steps == null) { + return null; + } + + boolean[] tResult = new boolean[steps.size()]; + for (int i = 0; i < steps.size(); i++) { + StepMetaDataCombi sid = steps.get(i); + tResult[i] = (sid.step.isRunning() || sid.step.getStatus() != StepExecutionStatus.STATUS_FINISHED); + } + return tResult; + } + + /** + * Checks the execution status of each step in the transformations. + * + * @return an array associated with the step list, indicating the status of that step. + */ + public StepExecutionStatus[] getTransStepExecutionStatusLookup() { + if (steps == null) { + return null; + } + + // we need this snapshot for the TransGridDelegate refresh method to handle the + // difference between a timed refresh and continual step status updates + int totalSteps = steps.size(); + StepExecutionStatus[] tList = new StepExecutionStatus[totalSteps]; + for (int i = 0; i < totalSteps; i++) { + StepMetaDataCombi sid = steps.get(i); + tList[i] = sid.step.getStatus(); + } + return tList; + } + + /** + * Gets the run thread for the step at the specified index. + * + * @param i the index of the desired step + * @return a StepInterface object corresponding to the run thread for the specified step + */ + public StepInterface getRunThread(int i) { + if (steps == null) { + return null; + } + return steps.get(i).step; + } + + /** + * Gets the run thread for the step with the specified name and copy number. + * + * @param name the step name + * @param copy the copy number + * @return a StepInterface object corresponding to the run thread for the specified step + */ + public StepInterface getRunThread(String name, int copy) { + if (steps == null) { + return null; + } + + for (int i = 0; i < steps.size(); i++) { + StepMetaDataCombi sid = steps.get(i); + StepInterface step = sid.step; + if (step.getStepname().equalsIgnoreCase(name) && step.getCopy() == copy) { + return step; + } + } + + return null; + } + + /** + * Calculate the batch id and date range for the transformation. + * + * @throws KettleTransException if there are any errors during calculation + */ + public void calculateBatchIdAndDateRange() throws KettleTransException { + + TransLogTable transLogTable = transMeta.getTransLogTable(); + + currentDate = new Date(); + logDate = new Date(); + startDate = Const.MIN_DATE; + endDate = currentDate; + + DatabaseMeta logConnection = transLogTable.getDatabaseMeta(); + String logTable = environmentSubstitute(transLogTable.getActualTableName()); + String logSchema = environmentSubstitute(transLogTable.getActualSchemaName()); + + try { + if (logConnection != null) { + + String logSchemaAndTable = logConnection.getQuotedSchemaTableCombination(logSchema, logTable); + if (Const.isEmpty(logTable)) { + // It doesn't make sense to start database logging without a table + // to log to. + throw new KettleTransException(BaseMessages.getString(PKG, "Trans.Exception.NoLogTableDefined")); + } + if (Const.isEmpty(transMeta.getName()) && logConnection != null && logTable != null) { + throw new KettleException(BaseMessages.getString(PKG, "Trans.Exception.NoTransnameAvailableForLogging")); + } + transLogTableDatabaseConnection = new Database(this, logConnection); + transLogTableDatabaseConnection.shareVariablesWith(this); + if (log.isDetailed()) { + log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.OpeningLogConnection", "" + logConnection)); + } + transLogTableDatabaseConnection.connect(); + transLogTableDatabaseConnection.setCommit(logCommitSize); + + // See if we have to add a batch id... + // Do this first, before anything else to lock the complete table exclusively + // + if (transLogTable.isBatchIdUsed()) { + Long id_batch = + logConnection.getNextBatchId(transLogTableDatabaseConnection, logSchema, logTable, transLogTable + .getKeyField().getFieldName()); + setBatchId(id_batch.longValue()); + } + + // + // Get the date range from the logging table: from the last end_date to now. (currentDate) + // + Object[] lastr = + transLogTableDatabaseConnection.getLastLogDate(logSchemaAndTable, transMeta.getName(), false, + LogStatus.END); + if (lastr != null && lastr.length > 0) { + startDate = (Date) lastr[0]; + if (log.isDetailed()) { + log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.StartDateFound") + startDate); + } + } + + // + // OK, we have a date-range. + // However, perhaps we need to look at a table before we make a final judgment? + // + if (transMeta.getMaxDateConnection() != null && transMeta.getMaxDateTable() != null && transMeta + .getMaxDateTable().length() > 0 && transMeta.getMaxDateField() != null && transMeta.getMaxDateField() + .length() > 0) { + if (log.isDetailed()) { + log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.LookingForMaxdateConnection", "" + transMeta + .getMaxDateConnection())); + } + DatabaseMeta maxcon = transMeta.getMaxDateConnection(); + if (maxcon != null) { + Database maxdb = new Database(this, maxcon); + maxdb.shareVariablesWith(this); + try { + if (log.isDetailed()) { + log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.OpeningMaximumDateConnection")); + } + maxdb.connect(); + maxdb.setCommit(logCommitSize); + + // + // Determine the endDate by looking at a field in a table... + // + String sql = "SELECT MAX(" + transMeta.getMaxDateField() + ") FROM " + transMeta.getMaxDateTable(); + RowMetaAndData r1 = maxdb.getOneRow(sql); + if (r1 != null) { + // OK, we have a value, what's the offset? + Date maxvalue = r1.getRowMeta().getDate(r1.getData(), 0); + if (maxvalue != null) { + if (log.isDetailed()) { + log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.LastDateFoundOnTheMaxdateConnection") + + r1); + } + endDate.setTime((long) (maxvalue.getTime() + (transMeta.getMaxDateOffset() * 1000))); + } + } else { + if (log.isDetailed()) { + log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.NoLastDateFoundOnTheMaxdateConnection")); + } + } + } catch (KettleException e) { + throw new KettleTransException(BaseMessages.getString(PKG, "Trans.Exception.ErrorConnectingToDatabase", + "" + transMeta.getMaxDateConnection()), e); + } finally { + maxdb.disconnect(); + } + } else { + throw new KettleTransException(BaseMessages.getString(PKG, + "Trans.Exception.MaximumDateConnectionCouldNotBeFound", "" + transMeta.getMaxDateConnection())); + } + } + + // Determine the last date of all dependend tables... + // Get the maximum in depdate... + if (transMeta.nrDependencies() > 0) { + if (log.isDetailed()) { + log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.CheckingForMaxDependencyDate")); + } + // + // Maybe one of the tables where this transformation is dependent on has changed? + // If so we need to change the start-date! + // + depDate = Const.MIN_DATE; + Date maxdepdate = Const.MIN_DATE; + if (lastr != null && lastr.length > 0) { + Date dep = (Date) lastr[1]; // #1: last depdate + if (dep != null) { + maxdepdate = dep; + depDate = dep; + } + } + + for (int i = 0; i < transMeta.nrDependencies(); i++) { + TransDependency td = transMeta.getDependency(i); + DatabaseMeta depcon = td.getDatabase(); + if (depcon != null) { + Database depdb = new Database(this, depcon); + try { + depdb.connect(); + depdb.setCommit(logCommitSize); + + String sql = "SELECT MAX(" + td.getFieldname() + ") FROM " + td.getTablename(); + RowMetaAndData r1 = depdb.getOneRow(sql); + if (r1 != null) { + // OK, we have a row, get the result! + Date maxvalue = (Date) r1.getData()[0]; + if (maxvalue != null) { + if (log.isDetailed()) { + log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.FoundDateFromTable", td.getTablename(), + "." + td.getFieldname(), " = " + maxvalue.toString())); + } + if (maxvalue.getTime() > maxdepdate.getTime()) { + maxdepdate = maxvalue; + } + } else { + throw new KettleTransException(BaseMessages.getString(PKG, + "Trans.Exception.UnableToGetDependencyInfoFromDB", td.getDatabase().getName() + ".", td + .getTablename() + ".", td.getFieldname())); + } + } else { + throw new KettleTransException(BaseMessages.getString(PKG, + "Trans.Exception.UnableToGetDependencyInfoFromDB", td.getDatabase().getName() + ".", td + .getTablename() + ".", td.getFieldname())); + } + } catch (KettleException e) { + throw new KettleTransException(BaseMessages.getString(PKG, "Trans.Exception.ErrorInDatabase", "" + td + .getDatabase()), e); + } finally { + depdb.disconnect(); + } + } else { + throw new KettleTransException(BaseMessages.getString(PKG, "Trans.Exception.ConnectionCouldNotBeFound", + "" + td.getDatabase())); + } + if (log.isDetailed()) { + log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.Maxdepdate") + (XMLHandler.date2string( + maxdepdate))); + } + } + + // OK, so we now have the maximum depdate; + // If it is larger, it means we have to read everything back in again. + // Maybe something has changed that we need! + // + if (maxdepdate.getTime() > depDate.getTime()) { + depDate = maxdepdate; + startDate = Const.MIN_DATE; + } + } else { + depDate = currentDate; + } + } + + // OK, now we have a date-range. See if we need to set a maximum! + if (transMeta.getMaxDateDifference() > 0.0 && // Do we have a difference specified? + startDate.getTime() > Const.MIN_DATE.getTime() // Is the startdate > Minimum? + ) { + // See if the end-date is larger then Start_date + DIFF? + Date maxdesired = new Date(startDate.getTime() + ((long) transMeta.getMaxDateDifference() * 1000)); + + // If this is the case: lower the end-date. Pick up the next 'region' next time around. + // We do this to limit the workload in a single update session (e.g. for large fact tables) + // + if (endDate.compareTo(maxdesired) > 0) { + endDate = maxdesired; + } + } + + } catch (KettleException e) { + throw new KettleTransException(BaseMessages.getString(PKG, "Trans.Exception.ErrorCalculatingDateRange", + logTable), e); + } + + // Be careful, We DO NOT close the trans log table database connection!!! + // It's closed later in beginProcessing() to prevent excessive connect/disconnect repetitions. + + } + + /** + * Begin processing. Also handle logging operations related to the start of the transformation + * + * @throws KettleTransException the kettle trans exception + */ + public void beginProcessing() throws KettleTransException { + TransLogTable transLogTable = transMeta.getTransLogTable(); + int intervalInSeconds = Const.toInt(environmentSubstitute(transLogTable.getLogInterval()), -1); + + try { + String logTable = transLogTable.getActualTableName(); + + SimpleDateFormat df = new SimpleDateFormat(REPLAY_DATE_FORMAT); + log.logDetailed(BaseMessages.getString(PKG, "Trans.Log.TransformationCanBeReplayed") + df.format( + currentDate)); + + try { + if (transLogTableDatabaseConnection != null && !Const.isEmpty(logTable) && !Const.isEmpty(transMeta + .getName())) { + transLogTableDatabaseConnection.writeLogRecord(transLogTable, LogStatus.START, this, null); + + // Pass in a commit to release transaction locks and to allow a user to actually see the log record. + // + if (!transLogTableDatabaseConnection.isAutoCommit()) { + transLogTableDatabaseConnection.commitLog(true, transLogTable); + } + + // If we need to do periodic logging, make sure to install a timer for this... + // + if (intervalInSeconds > 0) { + final Timer timer = new Timer(getName() + " - interval logging timer"); + TimerTask timerTask = new TimerTask() { + public void run() { + try { + endProcessing(); + } catch (Exception e) { + log.logError(BaseMessages.getString(PKG, "Trans.Exception.UnableToPerformIntervalLogging"), e); + // Also stop the show... + // + errors.incrementAndGet(); + stopAll(); + } + } + }; + timer.schedule(timerTask, intervalInSeconds * 1000, intervalInSeconds * 1000); + + addTransListener(new TransAdapter() { + public void transFinished(Trans trans) { + timer.cancel(); + } + }); + } + + // Add a listener to make sure that the last record is also written when transformation finishes... + // + addTransListener(new TransAdapter() { + public void transFinished(Trans trans) throws KettleException { + try { + endProcessing(); + + lastWrittenStepPerformanceSequenceNr = + writeStepPerformanceLogRecords(lastWrittenStepPerformanceSequenceNr, LogStatus.END); + + } catch (KettleException e) { + throw new KettleException(BaseMessages.getString(PKG, + "Trans.Exception.UnableToPerformLoggingAtTransEnd"), e); + } + } + }); + + } + + // If we need to write out the step logging information, do so at the end of the transformation too... + // + StepLogTable stepLogTable = transMeta.getStepLogTable(); + if (stepLogTable.isDefined()) { + addTransListener(new TransAdapter() { + public void transFinished(Trans trans) throws KettleException { + try { + writeStepLogInformation(); + } catch (KettleException e) { + throw new KettleException(BaseMessages.getString(PKG, + "Trans.Exception.UnableToPerformLoggingAtTransEnd"), e); + } + } + }); + } + + // If we need to write the log channel hierarchy and lineage information, add a listener for that too... + // + ChannelLogTable channelLogTable = transMeta.getChannelLogTable(); + if (channelLogTable.isDefined()) { + addTransListener(new TransAdapter() { + public void transFinished(Trans trans) throws KettleException { + try { + writeLogChannelInformation(); + } catch (KettleException e) { + throw new KettleException(BaseMessages.getString(PKG, + "Trans.Exception.UnableToPerformLoggingAtTransEnd"), e); + } + } + }); + } + + // See if we need to write the step performance records at intervals too... + // + PerformanceLogTable performanceLogTable = transMeta.getPerformanceLogTable(); + int perfLogInterval = Const.toInt(environmentSubstitute(performanceLogTable.getLogInterval()), -1); + if (performanceLogTable.isDefined() && perfLogInterval > 0) { + final Timer timer = new Timer(getName() + " - step performance log interval timer"); + TimerTask timerTask = new TimerTask() { + public void run() { + try { + lastWrittenStepPerformanceSequenceNr = + writeStepPerformanceLogRecords(lastWrittenStepPerformanceSequenceNr, LogStatus.RUNNING); + } catch (Exception e) { + log.logError(BaseMessages.getString(PKG, + "Trans.Exception.UnableToPerformIntervalPerformanceLogging"), e); + // Also stop the show... + // + errors.incrementAndGet(); + stopAll(); + } + } + }; + timer.schedule(timerTask, perfLogInterval * 1000, perfLogInterval * 1000); + + addTransListener(new TransAdapter() { + public void transFinished(Trans trans) { + timer.cancel(); + } + }); + } + } catch (KettleException e) { + throw new KettleTransException(BaseMessages.getString(PKG, "Trans.Exception.ErrorWritingLogRecordToTable", + logTable), e); + } finally { + // If we use interval logging, we keep the connection open for performance reasons... + // + if (transLogTableDatabaseConnection != null && (intervalInSeconds <= 0)) { + transLogTableDatabaseConnection.disconnect(); + transLogTableDatabaseConnection = null; + } + } + } catch (KettleException e) { + throw new KettleTransException(BaseMessages.getString(PKG, + "Trans.Exception.UnableToBeginProcessingTransformation"), e); + } + } + + /** + * Writes log channel information to a channel logging table (if one has been configured). + * + * @throws KettleException if any errors occur during logging + */ + protected void writeLogChannelInformation() throws KettleException { + Database db = null; + ChannelLogTable channelLogTable = transMeta.getChannelLogTable(); + + // PDI-7070: If parent trans or job has the same channel logging info, don't duplicate log entries + Trans t = getParentTrans(); + if (t != null) { + if (channelLogTable.equals(t.getTransMeta().getChannelLogTable())) { + return; + } + } + + Job j = getParentJob(); + + if (j != null) { + if (channelLogTable.equals(j.getJobMeta().getChannelLogTable())) { + return; + } + } + // end PDI-7070 + + try { + db = new Database(this, channelLogTable.getDatabaseMeta()); + db.shareVariablesWith(this); + db.connect(); + db.setCommit(logCommitSize); + + List loggingHierarchyList = getLoggingHierarchy(); + for (LoggingHierarchy loggingHierarchy : loggingHierarchyList) { + db.writeLogRecord(channelLogTable, LogStatus.START, loggingHierarchy, null); + } + + // Also time-out the log records in here... + // + db.cleanupLogRecords(channelLogTable); + } catch (Exception e) { + throw new KettleException(BaseMessages.getString(PKG, + "Trans.Exception.UnableToWriteLogChannelInformationToLogTable"), e); + } finally { + if (!db.isAutoCommit()) { + db.commit(true); + } + db.disconnect(); + } + } + + /** + * Writes step information to a step logging table (if one has been configured). + * + * @throws KettleException if any errors occur during logging + */ + protected void writeStepLogInformation() throws KettleException { + Database db = null; + StepLogTable stepLogTable = getTransMeta().getStepLogTable(); + try { + db = createDataBase(stepLogTable.getDatabaseMeta()); + db.shareVariablesWith(this); + db.connect(); + db.setCommit(logCommitSize); + + for (StepMetaDataCombi combi : getSteps()) { + db.writeLogRecord(stepLogTable, LogStatus.START, combi, null); + } + + db.cleanupLogRecords(stepLogTable); + } catch (Exception e) { + throw new KettleException(BaseMessages.getString(PKG, + "Trans.Exception.UnableToWriteStepInformationToLogTable"), e); + } finally { + if (!db.isAutoCommit()) { + db.commit(true); + } + db.disconnect(); + } + + } + + protected Database createDataBase(DatabaseMeta meta) { + return new Database(this, meta); + } + + protected synchronized void writeMetricsInformation() throws KettleException { + // + List metricsList = + MetricsUtil.getDuration(log.getLogChannelId(), Metrics.METRIC_PLUGIN_REGISTRY_REGISTER_EXTENSIONS_START); + if (!metricsList.isEmpty()) { + System.out.println(metricsList.get(0)); + } + + metricsList = + MetricsUtil.getDuration(log.getLogChannelId(), Metrics.METRIC_PLUGIN_REGISTRY_PLUGIN_REGISTRATION_START); + if (!metricsList.isEmpty()) { + System.out.println(metricsList.get(0)); + } + + long total = 0; + metricsList = + MetricsUtil.getDuration(log.getLogChannelId(), Metrics.METRIC_PLUGIN_REGISTRY_PLUGIN_TYPE_REGISTRATION_START); + if (metricsList != null) { + for (MetricsDuration duration : metricsList) { + total += duration.getDuration(); + System.out.println(" - " + duration.toString() + " Total=" + total); + } + } + + Database db = null; + MetricsLogTable metricsLogTable = transMeta.getMetricsLogTable(); + try { + db = new Database(this, metricsLogTable.getDatabaseMeta()); + db.shareVariablesWith(this); + db.connect(); + db.setCommit(logCommitSize); + + List logChannelIds = LoggingRegistry.getInstance().getLogChannelChildren(getLogChannelId()); + for (String logChannelId : logChannelIds) { + Deque snapshotList = + MetricsRegistry.getInstance().getSnapshotLists().get(logChannelId); + if (snapshotList != null) { + Iterator iterator = snapshotList.iterator(); + while (iterator.hasNext()) { + MetricsSnapshotInterface snapshot = iterator.next(); + db.writeLogRecord(metricsLogTable, LogStatus.START, new LoggingMetric(batchId, snapshot), null); + } + } + + Map snapshotMap = + MetricsRegistry.getInstance().getSnapshotMaps().get(logChannelId); + if (snapshotMap != null) { + synchronized (snapshotMap) { + Iterator iterator = snapshotMap.values().iterator(); + while (iterator.hasNext()) { + MetricsSnapshotInterface snapshot = iterator.next(); + db.writeLogRecord(metricsLogTable, LogStatus.START, new LoggingMetric(batchId, snapshot), null); + } + } + } + } + + // Also time-out the log records in here... + // + db.cleanupLogRecords(metricsLogTable); + } catch (Exception e) { + throw new KettleException(BaseMessages.getString(PKG, + "Trans.Exception.UnableToWriteMetricsInformationToLogTable"), e); + } finally { + if (!db.isAutoCommit()) { + db.commit(true); + } + db.disconnect(); + } + } + + /** + * Gets the result of the transformation. The Result object contains such measures as the number of errors, number of + * lines read/written/input/output/updated/rejected, etc. + * + * @return the Result object containing resulting measures from execution of the transformation + */ + public Result getResult() { + if (steps == null) { + return null; + } + + Result result = new Result(); + result.setNrErrors(errors.longValue()); + result.setResult(errors.longValue() == 0); + TransLogTable transLogTable = transMeta.getTransLogTable(); + + for (int i = 0; i < steps.size(); i++) { + StepMetaDataCombi sid = steps.get(i); + StepInterface step = sid.step; + + result.setNrErrors(result.getNrErrors() + sid.step.getErrors()); + result.getResultFiles().putAll(step.getResultFiles()); + + if (step.getStepname().equals(transLogTable.getSubjectString(TransLogTable.ID.LINES_READ))) { + result.setNrLinesRead(result.getNrLinesRead() + step.getLinesRead()); + } + if (step.getStepname().equals(transLogTable.getSubjectString(TransLogTable.ID.LINES_INPUT))) { + result.setNrLinesInput(result.getNrLinesInput() + step.getLinesInput()); + } + if (step.getStepname().equals(transLogTable.getSubjectString(TransLogTable.ID.LINES_WRITTEN))) { + result.setNrLinesWritten(result.getNrLinesWritten() + step.getLinesWritten()); + } + if (step.getStepname().equals(transLogTable.getSubjectString(TransLogTable.ID.LINES_OUTPUT))) { + result.setNrLinesOutput(result.getNrLinesOutput() + step.getLinesOutput()); + } + if (step.getStepname().equals(transLogTable.getSubjectString(TransLogTable.ID.LINES_UPDATED))) { + result.setNrLinesUpdated(result.getNrLinesUpdated() + step.getLinesUpdated()); + } + if (step.getStepname().equals(transLogTable.getSubjectString(TransLogTable.ID.LINES_REJECTED))) { + result.setNrLinesRejected(result.getNrLinesRejected() + step.getLinesRejected()); + } + } + + result.setRows(resultRows); + if (!Const.isEmpty(resultFiles)) { + result.setResultFiles(new HashMap()); + for (ResultFile resultFile : resultFiles) { + result.getResultFiles().put(resultFile.toString(), resultFile); + } + } + result.setStopped(isStopped()); + result.setLogChannelId(log.getLogChannelId()); + + return result; + } + + /** + * End processing. Also handle any logging operations associated with the end of a transformation + * + * @return true if all end processing is successful, false otherwise + * @throws KettleException if any errors occur during processing + */ + private synchronized boolean endProcessing() throws KettleException { + LogStatus status; + + if (isFinished()) { + if (isStopped()) { + status = LogStatus.STOP; + } else { + status = LogStatus.END; + } + } else if (isPaused()) { + status = LogStatus.PAUSED; } else { - log.logError( BaseMessages.getString( PKG, "Trans.Log.ProcessErrorInfo2", step.getStepname(), "." + step - .getCopy(), String.valueOf( step.getErrors() ), String.valueOf( proc ), String.valueOf( seconds ) ) ); - } - } - } - } - - /** - * Gets a representable metric of the "processed" lines of the last step. - * - * @return the number of lines processed by the last step - */ - public long getLastProcessed() { - if ( steps == null || steps.size() == 0 ) { - return 0L; - } - StepMetaDataCombi sid = steps.get( steps.size() - 1 ); - return sid.step.getProcessed(); - } - - /** - * Finds the RowSet with the specified name. - * - * @param rowsetname - * the rowsetname - * @return the row set, or null if none found - */ - public RowSet findRowSet( String rowsetname ) { - // Start with the transformation. - for ( int i = 0; i < rowsets.size(); i++ ) { - // log.logDetailed("DIS: looking for RowSet ["+rowsetname+"] in nr "+i+" of "+threads.size()+" threads..."); - RowSet rs = rowsets.get( i ); - if ( rs.getName().equalsIgnoreCase( rowsetname ) ) { - return rs; - } - } - - return null; - } - - /** - * Finds the RowSet between two steps (or copies of steps). - * - * @param from - * the name of the "from" step - * @param fromcopy - * the copy number of the "from" step - * @param to - * the name of the "to" step - * @param tocopy - * the copy number of the "to" step - * @return the row set, or null if none found - */ - public RowSet findRowSet( String from, int fromcopy, String to, int tocopy ) { - // Start with the transformation. - for ( int i = 0; i < rowsets.size(); i++ ) { - RowSet rs = rowsets.get( i ); - if ( rs.getOriginStepName().equalsIgnoreCase( from ) && rs.getDestinationStepName().equalsIgnoreCase( to ) && rs - .getOriginStepCopy() == fromcopy && rs.getDestinationStepCopy() == tocopy ) { - return rs; - } - } - - return null; - } - - /** - * Checks whether the specified step (or step copy) has started. - * - * @param sname - * the step name - * @param copy - * the copy number - * @return true the specified step (or step copy) has started, false otherwise - */ - public boolean hasStepStarted( String sname, int copy ) { - // log.logDetailed("DIS: Checking wether of not ["+sname+"]."+cnr+" has started!"); - // log.logDetailed("DIS: hasStepStarted() looking in "+threads.size()+" threads"); - for ( int i = 0; i < steps.size(); i++ ) { - StepMetaDataCombi sid = steps.get( i ); - boolean started = ( sid.stepname != null && sid.stepname.equalsIgnoreCase( sname ) ) && sid.copy == copy; - if ( started ) { + status = LogStatus.RUNNING; + } + + TransLogTable transLogTable = transMeta.getTransLogTable(); + int intervalInSeconds = Const.toInt(environmentSubstitute(transLogTable.getLogInterval()), -1); + + logDate = new Date(); + + // OK, we have some logging to do... + // + DatabaseMeta logcon = transMeta.getTransLogTable().getDatabaseMeta(); + String logTable = transMeta.getTransLogTable().getActualTableName(); + if (logcon != null) { + Database ldb = null; + + try { + // Let's not reconnect/disconnect all the time for performance reasons! + // + if (transLogTableDatabaseConnection == null) { + ldb = new Database(this, logcon); + ldb.shareVariablesWith(this); + ldb.connect(); + ldb.setCommit(logCommitSize); + transLogTableDatabaseConnection = ldb; + } else { + ldb = transLogTableDatabaseConnection; + } + + // Write to the standard transformation log table... + // + if (!Const.isEmpty(logTable)) { + ldb.writeLogRecord(transLogTable, status, this, null); + } + + // Also time-out the log records in here... + // + if (status.equals(LogStatus.END) || status.equals(LogStatus.STOP)) { + ldb.cleanupLogRecords(transLogTable); + } + + // Commit the operations to prevent locking issues + // + if (!ldb.isAutoCommit()) { + ldb.commitLog(true, transMeta.getTransLogTable()); + } + } catch (KettleDatabaseException e) { + // PDI-9790 error write to log db is transaction error + log.logError(BaseMessages.getString(PKG, "Database.Error.WriteLogTable", logTable), e); + errors.incrementAndGet(); + // end PDI-9790 + } catch (Exception e) { + throw new KettleException(BaseMessages.getString(PKG, "Trans.Exception.ErrorWritingLogRecordToTable", + transMeta.getTransLogTable().getActualTableName()), e); + } finally { + if (intervalInSeconds <= 0 || (status.equals(LogStatus.END) || status.equals(LogStatus.STOP))) { + ldb.disconnect(); + transLogTableDatabaseConnection = null; // disconnected + } + } + } return true; - } - } - return false; - } - - /** - * Stops all steps from running, and alerts any registered listeners. - */ - public void stopAll() { - if ( steps == null ) { - return; - } - - // log.logDetailed("DIS: Checking wether of not ["+sname+"]."+cnr+" has started!"); - // log.logDetailed("DIS: hasStepStarted() looking in "+threads.size()+" threads"); - for ( int i = 0; i < steps.size(); i++ ) { - StepMetaDataCombi sid = steps.get( i ); - StepInterface rt = sid.step; - rt.setStopped( true ); - rt.resumeRunning(); - - // Cancel queries etc. by force... - StepInterface si = rt; - try { - si.stopRunning( sid.meta, sid.data ); - } catch ( Exception e ) { - log.logError( "Something went wrong while trying to stop the transformation: " + e.toString() ); - log.logError( Const.getStackTracker( e ) ); - } - - sid.data.setStatus( StepExecutionStatus.STATUS_STOPPED ); - } - - // if it is stopped it is not paused - paused.set( false ); - stopped.set( true ); - - // Fire the stopped listener... - // - synchronized ( transStoppedListeners ) { - for ( TransStoppedListener listener : transStoppedListeners ) { - listener.transStopped( this ); - } - } - } - - /** - * Gets the number of steps in this transformation. - * - * @return the number of steps - */ - public int nrSteps() { - if ( steps == null ) { - return 0; - } - return steps.size(); - } - - /** - * Gets the number of active (i.e. not finished) steps in this transformation - * - * @return the number of active steps - */ - public int nrActiveSteps() { - if ( steps == null ) { - return 0; - } - - int nr = 0; - for ( int i = 0; i < steps.size(); i++ ) { - StepMetaDataCombi sid = steps.get( i ); - // without also considering a step status of not finished, - // the step execution results grid shows empty while - // the transformation has steps still running. - // if ( sid.step.isRunning() ) nr++; - if ( sid.step.isRunning() || sid.step.getStatus() != StepExecutionStatus.STATUS_FINISHED ) { - nr++; - } - } - return nr; - } - - /** - * Checks whether the transformation steps are running lookup. - * - * @return a boolean array associated with the step list, indicating whether that step is running a lookup. - */ - public boolean[] getTransStepIsRunningLookup() { - if ( steps == null ) { - return null; - } - - boolean[] tResult = new boolean[steps.size()]; - for ( int i = 0; i < steps.size(); i++ ) { - StepMetaDataCombi sid = steps.get( i ); - tResult[i] = ( sid.step.isRunning() || sid.step.getStatus() != StepExecutionStatus.STATUS_FINISHED ); - } - return tResult; - } - - /** - * Checks the execution status of each step in the transformations. - * - * @return an array associated with the step list, indicating the status of that step. - */ - public StepExecutionStatus[] getTransStepExecutionStatusLookup() { - if ( steps == null ) { - return null; - } - - // we need this snapshot for the TransGridDelegate refresh method to handle the - // difference between a timed refresh and continual step status updates - int totalSteps = steps.size(); - StepExecutionStatus[] tList = new StepExecutionStatus[totalSteps]; - for ( int i = 0; i < totalSteps; i++ ) { - StepMetaDataCombi sid = steps.get( i ); - tList[i] = sid.step.getStatus(); - } - return tList; - } - - /** - * Gets the run thread for the step at the specified index. - * - * @param i - * the index of the desired step - * @return a StepInterface object corresponding to the run thread for the specified step - */ - public StepInterface getRunThread( int i ) { - if ( steps == null ) { - return null; - } - return steps.get( i ).step; - } - - /** - * Gets the run thread for the step with the specified name and copy number. - * - * @param name - * the step name - * @param copy - * the copy number - * @return a StepInterface object corresponding to the run thread for the specified step - */ - public StepInterface getRunThread( String name, int copy ) { - if ( steps == null ) { - return null; - } - - for ( int i = 0; i < steps.size(); i++ ) { - StepMetaDataCombi sid = steps.get( i ); - StepInterface step = sid.step; - if ( step.getStepname().equalsIgnoreCase( name ) && step.getCopy() == copy ) { - return step; - } - } - - return null; - } - - /** - * Calculate the batch id and date range for the transformation. - * - * @throws KettleTransException - * if there are any errors during calculation - */ - public void calculateBatchIdAndDateRange() throws KettleTransException { - - TransLogTable transLogTable = transMeta.getTransLogTable(); - - currentDate = new Date(); - logDate = new Date(); - startDate = Const.MIN_DATE; - endDate = currentDate; - - DatabaseMeta logConnection = transLogTable.getDatabaseMeta(); - String logTable = environmentSubstitute( transLogTable.getActualTableName() ); - String logSchema = environmentSubstitute( transLogTable.getActualSchemaName() ); - - try { - if ( logConnection != null ) { - - String logSchemaAndTable = logConnection.getQuotedSchemaTableCombination( logSchema, logTable ); - if ( Const.isEmpty( logTable ) ) { - // It doesn't make sense to start database logging without a table - // to log to. - throw new KettleTransException( BaseMessages.getString( PKG, "Trans.Exception.NoLogTableDefined" ) ); - } - if ( Const.isEmpty( transMeta.getName() ) && logConnection != null && logTable != null ) { - throw new KettleException( BaseMessages.getString( PKG, "Trans.Exception.NoTransnameAvailableForLogging" ) ); - } - transLogTableDatabaseConnection = new Database( this, logConnection ); - transLogTableDatabaseConnection.shareVariablesWith( this ); - if ( log.isDetailed() ) { - log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.OpeningLogConnection", "" + logConnection ) ); - } - transLogTableDatabaseConnection.connect(); - transLogTableDatabaseConnection.setCommit( logCommitSize ); - - // See if we have to add a batch id... - // Do this first, before anything else to lock the complete table exclusively + } + + /** + * Write step performance log records. + * + * @param startSequenceNr the start sequence numberr + * @param status the logging status. If this is End, perform cleanup + * @return the new sequence number + * @throws KettleException if any errors occur during logging + */ + private int writeStepPerformanceLogRecords(int startSequenceNr, LogStatus status) throws KettleException { + int lastSeqNr = 0; + Database ldb = null; + PerformanceLogTable performanceLogTable = transMeta.getPerformanceLogTable(); + + if (!performanceLogTable.isDefined() || !transMeta.isCapturingStepPerformanceSnapShots() + || stepPerformanceSnapShots == null || stepPerformanceSnapShots.isEmpty()) { + return 0; // nothing to do here! + } + + try { + ldb = new Database(this, performanceLogTable.getDatabaseMeta()); + ldb.shareVariablesWith(this); + ldb.connect(); + ldb.setCommit(logCommitSize); + + // Write to the step performance log table... + // + RowMetaInterface rowMeta = performanceLogTable.getLogRecord(LogStatus.START, null, null).getRowMeta(); + ldb.prepareInsert(rowMeta, performanceLogTable.getActualSchemaName(), performanceLogTable.getActualTableName()); + + synchronized (stepPerformanceSnapShots) { + Iterator> iterator = stepPerformanceSnapShots.values().iterator(); + while (iterator.hasNext()) { + List snapshots = iterator.next(); + synchronized (snapshots) { + Iterator snapshotsIterator = snapshots.iterator(); + while (snapshotsIterator.hasNext()) { + StepPerformanceSnapShot snapshot = snapshotsIterator.next(); + if (snapshot.getSeqNr() >= startSequenceNr && snapshot + .getSeqNr() <= lastStepPerformanceSnapshotSeqNrAdded) { + + RowMetaAndData row = performanceLogTable.getLogRecord(LogStatus.START, snapshot, null); + + ldb.setValuesInsert(row.getRowMeta(), row.getData()); + ldb.insertRow(true); + } + lastSeqNr = snapshot.getSeqNr(); + } + } + } + } + + ldb.insertFinished(true); + + // Finally, see if the log table needs cleaning up... + // + if (status.equals(LogStatus.END)) { + ldb.cleanupLogRecords(performanceLogTable); + } + + } catch (Exception e) { + throw new KettleException(BaseMessages.getString(PKG, + "Trans.Exception.ErrorWritingStepPerformanceLogRecordToTable"), e); + } finally { + if (ldb != null) { + ldb.disconnect(); + } + } + + return lastSeqNr + 1; + } + + /** + * Close unique database connections. If there are errors in the Result, perform a rollback + * + * @param result the result of the transformation execution + */ + private void closeUniqueDatabaseConnections(Result result) { + + // Don't close any connections if the parent job is using the same transaction + // + if (parentJob != null && transactionId != null && parentJob.getTransactionId() != null && transactionId.equals( + parentJob.getTransactionId())) { + return; + } + + // Don't close any connections if the parent transformation is using the same transaction + // + if (parentTrans != null && parentTrans.getTransMeta().isUsingUniqueConnections() && transactionId != null + && parentTrans.getTransactionId() != null && transactionId.equals(parentTrans.getTransactionId())) { + return; + } + + // First we get all the database connections ... // - if ( transLogTable.isBatchIdUsed() ) { - Long id_batch = - logConnection.getNextBatchId( transLogTableDatabaseConnection, logSchema, logTable, transLogTable - .getKeyField().getFieldName() ); - setBatchId( id_batch.longValue() ); + DatabaseConnectionMap map = DatabaseConnectionMap.getInstance(); + synchronized (map) { + List databaseList = new ArrayList(map.getMap().values()); + for (Database database : databaseList) { + if (database.getConnectionGroup().equals(getTransactionId())) { + try { + // This database connection belongs to this transformation. + // Let's roll it back if there is an error... + // + if (result.getNrErrors() > 0) { + try { + database.rollback(true); + log.logBasic(BaseMessages.getString(PKG, "Trans.Exception.TransactionsRolledBackOnConnection", + database.toString())); + } catch (Exception e) { + throw new KettleDatabaseException(BaseMessages.getString(PKG, + "Trans.Exception.ErrorRollingBackUniqueConnection", database.toString()), e); + } + } else { + try { + database.commit(true); + log.logBasic(BaseMessages.getString(PKG, "Trans.Exception.TransactionsCommittedOnConnection", database + .toString())); + } catch (Exception e) { + throw new KettleDatabaseException(BaseMessages.getString(PKG, + "Trans.Exception.ErrorCommittingUniqueConnection", database.toString()), e); + } + } + } catch (Exception e) { + log.logError(BaseMessages.getString(PKG, "Trans.Exception.ErrorHandlingTransformationTransaction", + database.toString()), e); + result.setNrErrors(result.getNrErrors() + 1); + } finally { + try { + // This database connection belongs to this transformation. + database.closeConnectionOnly(); + } catch (Exception e) { + log.logError(BaseMessages.getString(PKG, "Trans.Exception.ErrorHandlingTransformationTransaction", + database.toString()), e); + result.setNrErrors(result.getNrErrors() + 1); + } finally { + // Remove the database from the list... + // + map.removeConnection(database.getConnectionGroup(), database.getPartitionId(), database); + } + } + } + } + + // Who else needs to be informed of the rollback or commit? + // + List transactionListeners = map.getTransactionListeners(getTransactionId()); + if (result.getNrErrors() > 0) { + for (DatabaseTransactionListener listener : transactionListeners) { + try { + listener.rollback(); + } catch (Exception e) { + log.logError(BaseMessages.getString(PKG, "Trans.Exception.ErrorHandlingTransactionListenerRollback"), + e); + result.setNrErrors(result.getNrErrors() + 1); + } + } + } else { + for (DatabaseTransactionListener listener : transactionListeners) { + try { + listener.commit(); + } catch (Exception e) { + log.logError(BaseMessages.getString(PKG, "Trans.Exception.ErrorHandlingTransactionListenerCommit"), e); + result.setNrErrors(result.getNrErrors() + 1); + } + } + } + + } + } + + /** + * Find the run thread for the step with the specified name. + * + * @param stepname the step name + * @return a StepInterface object corresponding to the run thread for the specified step + */ + public StepInterface findRunThread(String stepname) { + if (steps == null) { + return null; + } + + for (int i = 0; i < steps.size(); i++) { + StepMetaDataCombi sid = steps.get(i); + StepInterface step = sid.step; + if (step.getStepname().equalsIgnoreCase(stepname)) { + return step; + } + } + return null; + } + + /** + * Find the base steps for the step with the specified name. + * + * @param stepname the step name + * @return the list of base steps for the specified step + */ + public List findBaseSteps(String stepname) { + List baseSteps = new ArrayList(); + + if (steps == null) { + return baseSteps; + } + + for (int i = 0; i < steps.size(); i++) { + StepMetaDataCombi sid = steps.get(i); + StepInterface stepInterface = sid.step; + if (stepInterface.getStepname().equalsIgnoreCase(stepname)) { + baseSteps.add(stepInterface); + } + } + return baseSteps; + } + + /** + * Find the executing step copy for the step with the specified name and copy number + * + * @param stepname the step name + * @param copynr + * @return the executing step found or null if no copy could be found. + */ + public StepInterface findStepInterface(String stepname, int copyNr) { + if (steps == null) { + return null; + } + + for (int i = 0; i < steps.size(); i++) { + StepMetaDataCombi sid = steps.get(i); + StepInterface stepInterface = sid.step; + if (stepInterface.getStepname().equalsIgnoreCase(stepname) && sid.copy == copyNr) { + return stepInterface; + } + } + return null; + } + + /** + * Find the available executing step copies for the step with the specified name + * + * @param stepname the step name + * @param copynr + * @return the list of executing step copies found or null if no steps are available yet (incorrect usage) + */ + public List findStepInterfaces(String stepname) { + if (steps == null) { + return null; + } + + List list = new ArrayList(); + + for (int i = 0; i < steps.size(); i++) { + StepMetaDataCombi sid = steps.get(i); + StepInterface stepInterface = sid.step; + if (stepInterface.getStepname().equalsIgnoreCase(stepname)) { + list.add(stepInterface); + } + } + return list; + } + + /** + * Find the data interface for the step with the specified name. + * + * @param name the step name + * @return the step data interface + */ + public StepDataInterface findDataInterface(String name) { + if (steps == null) { + return null; + } + + for (int i = 0; i < steps.size(); i++) { + StepMetaDataCombi sid = steps.get(i); + StepInterface rt = sid.step; + if (rt.getStepname().equalsIgnoreCase(name)) { + return sid.data; + } + } + return null; + } + + /** + * Gets the start date/time object for the transformation. + * + * @return Returns the startDate. + */ + public Date getStartDate() { + return startDate; + } + + /** + * Gets the end date/time object for the transformation. + * + * @return Returns the endDate. + */ + public Date getEndDate() { + return endDate; + } + + /** + * Checks whether the running transformation is being monitored. + * + * @return true the running transformation is being monitored, false otherwise + */ + public boolean isMonitored() { + return monitored; + } + + /** + * Sets whether the running transformation should be monitored. + * + * @param monitored true if the running transformation should be monitored, false otherwise + */ + public void setMonitored(boolean monitored) { + this.monitored = monitored; + } + + /** + * Gets the meta-data for the transformation. + * + * @return Returns the transformation meta-data + */ + public TransMeta getTransMeta() { + return transMeta; + } + + /** + * Sets the meta-data for the transformation. + * + * @param transMeta The transformation meta-data to set. + */ + public void setTransMeta(TransMeta transMeta) { + this.transMeta = transMeta; + } + + /** + * Gets the current date/time object. + * + * @return the current date + */ + public Date getCurrentDate() { + return currentDate; + } + + /** + * Gets the dependency date for the transformation. A transformation can have a list of dependency fields. If any of + * these fields have a maximum date higher than the dependency date of the last run, the date range is set to to (-oo, + * now). The use-case is the incremental population of Slowly Changing Dimensions (SCD). + * + * @return Returns the dependency date + */ + public Date getDepDate() { + return depDate; + } + + /** + * Gets the date the transformation was logged. + * + * @return the log date + */ + public Date getLogDate() { + return logDate; + } + + /** + * Gets the rowsets for the transformation. + * + * @return a list of rowsets + */ + public List getRowsets() { + return rowsets; + } + + /** + * Gets a list of steps in the transformation. + * + * @return a list of the steps in the transformation + */ + public List getSteps() { + return steps; + } + + /** + * Gets a string representation of the transformation. + * + * @return the string representation of the transformation + * @see java.lang.Object#toString() + */ + public String toString() { + if (transMeta == null || transMeta.getName() == null) { + return getClass().getSimpleName(); + } + + // See if there is a parent transformation. If so, print the name of the parent here as well... + // + StringBuffer string = new StringBuffer(); + + // If we're running as a mapping, we get a reference to the calling (parent) transformation as well... + // + if (getParentTrans() != null) { + string.append('[').append(getParentTrans().toString()).append(']').append('.'); + } + + // When we run a mapping we also set a mapping step name in there... + // + if (!Const.isEmpty(mappingStepName)) { + string.append('[').append(mappingStepName).append(']').append('.'); + } + + string.append(transMeta.getName()); + + return string.toString(); + } + + /** + * Gets the mapping inputs for each step in the transformation. + * + * @return an array of MappingInputs + */ + public MappingInput[] findMappingInput() { + if (steps == null) { + return null; + } + + List list = new ArrayList(); + + // Look in threads and find the MappingInput step thread... + for (int i = 0; i < steps.size(); i++) { + StepMetaDataCombi smdc = steps.get(i); + StepInterface step = smdc.step; + if (step.getStepID().equalsIgnoreCase("MappingInput")) { + list.add((MappingInput) step); + } + } + return list.toArray(new MappingInput[list.size()]); + } + + /** + * Gets the mapping outputs for each step in the transformation. + * + * @return an array of MappingOutputs + */ + public MappingOutput[] findMappingOutput() { + List list = new ArrayList(); + + if (steps != null) { + // Look in threads and find the MappingInput step thread... + for (int i = 0; i < steps.size(); i++) { + StepMetaDataCombi smdc = steps.get(i); + StepInterface step = smdc.step; + if (step.getStepID().equalsIgnoreCase("MappingOutput")) { + list.add((MappingOutput) step); + } + } + } + return list.toArray(new MappingOutput[list.size()]); + } + + /** + * Find the StepInterface (thread) by looking it up using the name. + * + * @param stepname The name of the step to look for + * @param copy the copy number of the step to look for + * @return the StepInterface or null if nothing was found. + */ + public StepInterface getStepInterface(String stepname, int copy) { + if (steps == null) { + return null; + } + + // Now start all the threads... + for (int i = 0; i < steps.size(); i++) { + StepMetaDataCombi sid = steps.get(i); + if (sid.stepname.equalsIgnoreCase(stepname) && sid.copy == copy) { + return sid.step; + } + } + + return null; + } + + /** + * Gets the replay date. The replay date is used to indicate that the transformation was replayed (re-tried, run + * again) with that particular replay date. You can use this in Text File/Excel Input to allow you to save error line + * numbers into a file (SOURCE_FILE.line for example) During replay, only the lines that have errors in them are + * passed to the next steps, the other lines are ignored. This is for the use case: if the document contained errors + * (bad dates, chars in numbers, etc), you simply send the document back to the source (the user/departement that + * created it probably) and when you get it back, re-run the last transformation. + * + * @return the replay date + */ + public Date getReplayDate() { + return replayDate; + } + + /** + * Sets the replay date. The replay date is used to indicate that the transformation was replayed (re-tried, run + * again) with that particular replay date. You can use this in Text File/Excel Input to allow you to save error line + * numbers into a file (SOURCE_FILE.line for example) During replay, only the lines that have errors in them are + * passed to the next steps, the other lines are ignored. This is for the use case: if the document contained errors + * (bad dates, chars in numbers, etc), you simply send the document back to the source (the user/departement that + * created it probably) and when you get it back, re-run the last transformation. + * + * @param replayDate the new replay date + */ + public void setReplayDate(Date replayDate) { + this.replayDate = replayDate; + } + + /** + * Turn on safe mode during running: the transformation will run slower but with more checking enabled. + * + * @param safeModeEnabled true for safe mode + */ + public void setSafeModeEnabled(boolean safeModeEnabled) { + this.safeModeEnabled = safeModeEnabled; + } + + /** + * Checks whether safe mode is enabled. + * + * @return Returns true if the safe mode is enabled: the transformation will run slower but with more checking enabled + */ + public boolean isSafeModeEnabled() { + return safeModeEnabled; + } + + /** + * This adds a row producer to the transformation that just got set up. It is preferable to run this BEFORE execute() + * but after prepareExecution() + * + * @param stepname The step to produce rows for + * @param copynr The copynr of the step to produce row for (normally 0 unless you have multiple copies running) + * @return the row producer + * @throws KettleException in case the thread/step to produce rows for could not be found. + * @see Trans#execute(String[]) + * @see Trans#prepareExecution(String[]) + */ + public RowProducer addRowProducer(String stepname, int copynr) throws KettleException { + StepInterface stepInterface = getStepInterface(stepname, copynr); + if (stepInterface == null) { + throw new KettleException("Unable to find thread with name " + stepname + " and copy number " + copynr); + } + + // We are going to add an extra RowSet to this stepInterface. + RowSet rowSet; + switch (transMeta.getTransformationType()) { + case Normal: + rowSet = new BlockingRowSet(transMeta.getSizeRowset()); + break; + case SerialSingleThreaded: + rowSet = new SingleRowRowSet(); + break; + case SingleThreaded: + rowSet = new QueueRowSet(); + break; + default: + throw new KettleException("Unhandled transformation type: " + transMeta.getTransformationType()); + } + + // Add this rowset to the list of active rowsets for the selected step + stepInterface.getInputRowSets().add(rowSet); + + return new RowProducer(stepInterface, rowSet); + } + + /** + * Gets the parent job, or null if there is no parent. + * + * @return the parent job, or null if there is no parent + */ + public Job getParentJob() { + return parentJob; + } + + /** + * Sets the parent job for the transformation. + * + * @param parentJob The parent job to set + */ + public void setParentJob(Job parentJob) { + this.logLevel = parentJob.getLogLevel(); + this.log.setLogLevel(logLevel); + this.parentJob = parentJob; + + transactionId = calculateTransactionId(); + } + + /** + * Finds the StepDataInterface (currently) associated with the specified step. + * + * @param stepname The name of the step to look for + * @param stepcopy The copy number (0 based) of the step + * @return The StepDataInterface or null if non found. + */ + public StepDataInterface getStepDataInterface(String stepname, int stepcopy) { + if (steps == null) { + return null; + } + + for (int i = 0; i < steps.size(); i++) { + StepMetaDataCombi sid = steps.get(i); + if (sid.stepname.equals(stepname) && sid.copy == stepcopy) { + return sid.data; + } + } + return null; + } + + /** + * Checks whether the transformation has any steps that are halted. + * + * @return true if one or more steps are halted, false otherwise + */ + public boolean hasHaltedSteps() { + // not yet 100% sure of this, if there are no steps... or none halted? + if (steps == null) { + return false; + } + + for (int i = 0; i < steps.size(); i++) { + StepMetaDataCombi sid = steps.get(i); + if (sid.data.getStatus() == StepExecutionStatus.STATUS_HALTED) { + return true; + } + } + return false; + } + + /** + * Gets the job start date. + * + * @return the job start date + */ + public Date getJobStartDate() { + return jobStartDate; + } + + /** + * Gets the job end date. + * + * @return the job end date + */ + public Date getJobEndDate() { + return jobEndDate; + } + + /** + * Sets the job end date. + * + * @param jobEndDate the jobEndDate to set + */ + public void setJobEndDate(Date jobEndDate) { + this.jobEndDate = jobEndDate; + } + + /** + * Sets the job start date. + * + * @param jobStartDate the jobStartDate to set + */ + public void setJobStartDate(Date jobStartDate) { + this.jobStartDate = jobStartDate; + } + + /** + * Get the batch ID that is passed from the parent job to the transformation. If nothing is passed, it's the + * transformation's batch ID + * + * @return the parent job's batch ID, or the transformation's batch ID if there is no parent job + */ + public long getPassedBatchId() { + return passedBatchId; + } + + /** + * Sets the passed batch ID of the transformation from the batch ID of the parent job. + * + * @param jobBatchId the jobBatchId to set + */ + public void setPassedBatchId(long jobBatchId) { + this.passedBatchId = jobBatchId; + } + + /** + * Gets the batch ID of the transformation. + * + * @return the batch ID of the transformation + */ + public long getBatchId() { + return batchId; + } + + /** + * Sets the batch ID of the transformation. + * + * @param batchId the batch ID to set + */ + public void setBatchId(long batchId) { + this.batchId = batchId; + } + + /** + * Gets the name of the thread that contains the transformation. + * + * @return the thread name + * @deprecated please use getTransactionId() instead + */ + @Deprecated + public String getThreadName() { + return threadName; + } + + /** + * Sets the thread name for the transformation. + * + * @param threadName the thread name + * @deprecated please use setTransactionId() instead + */ + @Deprecated + public void setThreadName(String threadName) { + this.threadName = threadName; + } + + /** + * Gets the status of the transformation (Halting, Finished, Paused, etc.) + * + * @return the status of the transformation + */ + public String getStatus() { + String message; + + if (running) { + if (isStopped()) { + message = STRING_HALTING; + } else { + if (isFinished()) { + message = STRING_FINISHED; + if (getResult().getNrErrors() > 0) { + message += " (with errors)"; + } + } else if (isPaused()) { + message = STRING_PAUSED; + } else { + message = STRING_RUNNING; + } + } + } else if (isStopped()) { + message = STRING_STOPPED; + } else if (preparing) { + message = STRING_PREPARING; + } else if (initializing) { + message = STRING_INITIALIZING; + } else { + message = STRING_WAITING; + } + + return message; + } + + /** + * Checks whether the transformation is initializing. + * + * @return true if the transformation is initializing, false otherwise + */ + public boolean isInitializing() { + return initializing; + } + + /** + * Sets whether the transformation is initializing. + * + * @param initializing true if the transformation is initializing, false otherwise + */ + public void setInitializing(boolean initializing) { + this.initializing = initializing; + } + + /** + * Checks whether the transformation is preparing for execution. + * + * @return true if the transformation is preparing for execution, false otherwise + */ + public boolean isPreparing() { + return preparing; + } + + /** + * Sets whether the transformation is preparing for execution. + * + * @param preparing true if the transformation is preparing for execution, false otherwise + */ + public void setPreparing(boolean preparing) { + this.preparing = preparing; + } + + /** + * Checks whether the transformation is running. + * + * @return true if the transformation is running, false otherwise + */ + public boolean isRunning() { + return running; + } + + /** + * Sets whether the transformation is running. + * + * @param running true if the transformation is running, false otherwise + */ + public void setRunning(boolean running) { + this.running = running; + } + + /** + * Execute the transformation in a clustered fashion. The transformation steps are split and collected in a + * TransSplitter object + * + * @param transMeta the transformation's meta-data + * @param executionConfiguration the execution configuration + * @return the transformation splitter object + * @throws KettleException the kettle exception + */ + public static final TransSplitter executeClustered(final TransMeta transMeta, + final TransExecutionConfiguration executionConfiguration) throws KettleException { + if (Const.isEmpty(transMeta.getName())) { + throw new KettleException("The transformation needs a name to uniquely identify it by on the remote server."); + } + + TransSplitter transSplitter = new TransSplitter(transMeta); + transSplitter.splitOriginalTransformation(); + + // Pass the clustered run ID to allow for parallel execution of clustered transformations + // + executionConfiguration.getVariables().put(Const.INTERNAL_VARIABLE_CLUSTER_RUN_ID, transSplitter + .getClusteredRunId()); + + executeClustered(transSplitter, executionConfiguration); + return transSplitter; + } + + /** + * Executes an existing TransSplitter, with the transformation already split. + * + * @param transSplitter the trans splitter + * @param executionConfiguration the execution configuration + * @throws KettleException the kettle exception + * @see org.pentaho.di.ui.spoon.delegates.SpoonTransformationDelegate + */ + public static final void executeClustered(final TransSplitter transSplitter, + final TransExecutionConfiguration executionConfiguration) throws KettleException { + try { + // Send the transformations to the servers... + // + // First the master and the slaves... + // + TransMeta master = transSplitter.getMaster(); + final SlaveServer[] slaves = transSplitter.getSlaveTargets(); + final Thread[] threads = new Thread[slaves.length]; + final Throwable[] errors = new Throwable[slaves.length]; + + // Keep track of the various Carte object IDs + // + final Map carteObjectMap = transSplitter.getCarteObjectMap(); + + // + // Send them all on their way... + // + SlaveServer masterServer = null; + List masterSteps = master.getTransHopSteps(false); + if (masterSteps.size() > 0) { // If there is something that needs to be done on the master... + masterServer = transSplitter.getMasterServer(); + if (executionConfiguration.isClusterPosting()) { + TransConfiguration transConfiguration = new TransConfiguration(master, executionConfiguration); + Map variables = transConfiguration.getTransExecutionConfiguration().getVariables(); + variables.put(Const.INTERNAL_VARIABLE_CLUSTER_SIZE, Integer.toString(slaves.length)); + variables.put(Const.INTERNAL_VARIABLE_CLUSTER_MASTER, "Y"); + + // Parameters override the variables but they need to pass over the configuration too... + // + Map params = transConfiguration.getTransExecutionConfiguration().getParams(); + TransMeta ot = transSplitter.getOriginalTransformation(); + for (String param : ot.listParameters()) { + String value = + Const.NVL(ot.getParameterValue(param), Const.NVL(ot.getParameterDefault(param), ot.getVariable( + param))); + params.put(param, value); + } + + String masterReply = + masterServer.sendXML(transConfiguration.getXML(), RegisterTransServlet.CONTEXT_PATH + "/?xml=Y"); + WebResult webResult = WebResult.fromXMLString(masterReply); + if (!webResult.getResult().equalsIgnoreCase(WebResult.STRING_OK)) { + throw new KettleException("An error occurred sending the master transformation: " + webResult + .getMessage()); + } + carteObjectMap.put(master, webResult.getId()); + } + } + + // Then the slaves... + // These are started in a background thread. + // + for (int i = 0; i < slaves.length; i++) { + final int index = i; + + final TransMeta slaveTrans = transSplitter.getSlaveTransMap().get(slaves[i]); + + if (executionConfiguration.isClusterPosting()) { + Runnable runnable = new Runnable() { + public void run() { + try { + // Create a copy for local use... We get race-conditions otherwise... + // + TransExecutionConfiguration slaveTransExecutionConfiguration = + (TransExecutionConfiguration) executionConfiguration.clone(); + TransConfiguration transConfiguration = + new TransConfiguration(slaveTrans, slaveTransExecutionConfiguration); + + Map variables = slaveTransExecutionConfiguration.getVariables(); + variables.put(Const.INTERNAL_VARIABLE_SLAVE_SERVER_NUMBER, Integer.toString(index)); + variables.put(Const.INTERNAL_VARIABLE_SLAVE_SERVER_NAME, slaves[index].getName()); + variables.put(Const.INTERNAL_VARIABLE_CLUSTER_SIZE, Integer.toString(slaves.length)); + variables.put(Const.INTERNAL_VARIABLE_CLUSTER_MASTER, "N"); + + // Parameters override the variables but they need to pass over the configuration too... + // + Map params = slaveTransExecutionConfiguration.getParams(); + TransMeta ot = transSplitter.getOriginalTransformation(); + for (String param : ot.listParameters()) { + String value = + Const.NVL(ot.getParameterValue(param), Const.NVL(ot.getParameterDefault(param), ot + .getVariable(param))); + params.put(param, value); + } + + String slaveReply = + slaves[index].sendXML(transConfiguration.getXML(), RegisterTransServlet.CONTEXT_PATH + "/?xml=Y"); + WebResult webResult = WebResult.fromXMLString(slaveReply); + if (!webResult.getResult().equalsIgnoreCase(WebResult.STRING_OK)) { + throw new KettleException("An error occurred sending a slave transformation: " + webResult + .getMessage()); + } + carteObjectMap.put(slaveTrans, webResult.getId()); + } catch (Throwable t) { + errors[index] = t; + } + } + }; + threads[i] = new Thread(runnable); + } + } + + // Start the slaves + for (int i = 0; i < threads.length; i++) { + if (threads[i] != null) { + threads[i].start(); + } + } + + // Wait until the slaves report back... + // Sending the XML over is the heaviest part + // Later we can do the others as well... + // + for (int i = 0; i < threads.length; i++) { + if (threads[i] != null) { + threads[i].join(); + if (errors[i] != null) { + throw new KettleException(errors[i]); + } + } + } + + if (executionConfiguration.isClusterPosting()) { + if (executionConfiguration.isClusterPreparing()) { + // Prepare the master... + if (masterSteps.size() > 0) { // If there is something that needs to be done on the master... + String carteObjectId = carteObjectMap.get(master); + String masterReply = + masterServer.execService(PrepareExecutionTransServlet.CONTEXT_PATH + "/?name=" + URLEncoder.encode( + master.getName(), "UTF-8") + "&id=" + URLEncoder.encode(carteObjectId, "UTF-8") + "&xml=Y"); + WebResult webResult = WebResult.fromXMLString(masterReply); + if (!webResult.getResult().equalsIgnoreCase(WebResult.STRING_OK)) { + throw new KettleException( + "An error occurred while preparing the execution of the master transformation: " + webResult + .getMessage()); + } + } + + // Prepare the slaves + // WG: Should these be threaded like the above initialization? + for (int i = 0; i < slaves.length; i++) { + TransMeta slaveTrans = transSplitter.getSlaveTransMap().get(slaves[i]); + String carteObjectId = carteObjectMap.get(slaveTrans); + String slaveReply = + slaves[i].execService(PrepareExecutionTransServlet.CONTEXT_PATH + "/?name=" + URLEncoder.encode( + slaveTrans.getName(), "UTF-8") + "&id=" + URLEncoder.encode(carteObjectId, "UTF-8") + "&xml=Y"); + WebResult webResult = WebResult.fromXMLString(slaveReply); + if (!webResult.getResult().equalsIgnoreCase(WebResult.STRING_OK)) { + throw new KettleException("An error occurred while preparing the execution of a slave transformation: " + + webResult.getMessage()); + } + } + } + + if (executionConfiguration.isClusterStarting()) { + // Start the master... + if (masterSteps.size() > 0) { // If there is something that needs to be done on the master... + String carteObjectId = carteObjectMap.get(master); + String masterReply = + masterServer.execService(StartExecutionTransServlet.CONTEXT_PATH + "/?name=" + URLEncoder.encode( + master.getName(), "UTF-8") + "&id=" + URLEncoder.encode(carteObjectId, "UTF-8") + "&xml=Y"); + WebResult webResult = WebResult.fromXMLString(masterReply); + if (!webResult.getResult().equalsIgnoreCase(WebResult.STRING_OK)) { + throw new KettleException("An error occurred while starting the execution of the master transformation: " + + webResult.getMessage()); + } + } + + // Start the slaves + // WG: Should these be threaded like the above initialization? + for (int i = 0; i < slaves.length; i++) { + TransMeta slaveTrans = transSplitter.getSlaveTransMap().get(slaves[i]); + String carteObjectId = carteObjectMap.get(slaveTrans); + String slaveReply = + slaves[i].execService(StartExecutionTransServlet.CONTEXT_PATH + "/?name=" + URLEncoder.encode( + slaveTrans.getName(), "UTF-8") + "&id=" + URLEncoder.encode(carteObjectId, "UTF-8") + "&xml=Y"); + WebResult webResult = WebResult.fromXMLString(slaveReply); + if (!webResult.getResult().equalsIgnoreCase(WebResult.STRING_OK)) { + throw new KettleException("An error occurred while starting the execution of a slave transformation: " + + webResult.getMessage()); + } + } + } + } + } catch (KettleException ke) { + throw ke; + } catch (Exception e) { + throw new KettleException("There was an error during transformation split", e); + } + } + + /** + * Monitors a clustered transformation every second, after all the transformations in a cluster schema are running. + *
+ * Now we should verify that they are all running as they should.
+ * If a transformation has an error, we should kill them all.
+ * This should happen in a separate thread to prevent blocking of the UI.
+ *
+ * When the master and slave transformations have all finished, we should also run
+ * a cleanup on those transformations to release sockets, etc.
+ *
+ * + * @param log the log interface channel + * @param transSplitter the transformation splitter object + * @param parentJob the parent job when executed in a job, otherwise just set to null + * @return the number of errors encountered + */ + public static final long monitorClusteredTransformation(LogChannelInterface log, TransSplitter transSplitter, + Job parentJob) { + return monitorClusteredTransformation(log, transSplitter, parentJob, 1); // monitor every 1 seconds + } + + /** + * Monitors a clustered transformation every second, after all the transformations in a cluster schema are running. + *
+ * Now we should verify that they are all running as they should.
+ * If a transformation has an error, we should kill them all.
+ * This should happen in a separate thread to prevent blocking of the UI.
+ *
+ * When the master and slave transformations have all finished, we should also run
+ * a cleanup on those transformations to release sockets, etc.
+ *
+ * + * @param log the subject to use for logging + * @param transSplitter the transformation splitter object + * @param parentJob the parent job when executed in a job, otherwise just set to null + * @param sleepTimeSeconds the sleep time in seconds in between slave transformation status polling + * @return the number of errors encountered + */ + public static final long monitorClusteredTransformation(LogChannelInterface log, TransSplitter transSplitter, + Job parentJob, int sleepTimeSeconds) { + long errors = 0L; + + // + // See if the remote transformations have finished. + // We could just look at the master, but I doubt that that is enough in all + // situations. + // + SlaveServer[] slaveServers = transSplitter.getSlaveTargets(); // <-- ask + // these guys + TransMeta[] slaves = transSplitter.getSlaves(); + Map carteObjectMap = transSplitter.getCarteObjectMap(); + + SlaveServer masterServer; + try { + masterServer = transSplitter.getMasterServer(); + } catch (KettleException e) { + log.logError("Error getting the master server", e); + masterServer = null; + errors++; + } + TransMeta masterTransMeta = transSplitter.getMaster(); + + boolean allFinished = false; + while (!allFinished && errors == 0 && (parentJob == null || !parentJob.isStopped())) { + allFinished = true; + errors = 0L; + + // Slaves first... + // + for (int s = 0; s < slaveServers.length && allFinished && errors == 0; s++) { + try { + String carteObjectId = carteObjectMap.get(slaves[s]); + SlaveServerTransStatus transStatus = slaveServers[s].getTransStatus(slaves[s].getName(), carteObjectId, 0); + if (transStatus.isRunning()) { + if (log.isDetailed()) { + log.logDetailed("Slave transformation on '" + slaveServers[s] + "' is still running."); + } + allFinished = false; + } else { + if (log.isDetailed()) { + log.logDetailed("Slave transformation on '" + slaveServers[s] + "' has finished."); + } + } + errors += transStatus.getNrStepErrors(); + } catch (Exception e) { + errors += 1; + log.logError("Unable to contact slave server '" + slaveServers[s].getName() + + "' to check slave transformation : " + e.toString()); + } + } + + // Check the master too + if (allFinished && errors == 0 && masterTransMeta != null && masterTransMeta.nrSteps() > 0) { + try { + String carteObjectId = carteObjectMap.get(masterTransMeta); + SlaveServerTransStatus transStatus = + masterServer.getTransStatus(masterTransMeta.getName(), carteObjectId, 0); + if (transStatus.isRunning()) { + if (log.isDetailed()) { + log.logDetailed("Master transformation is still running."); + } + allFinished = false; + } else { + if (log.isDetailed()) { + log.logDetailed("Master transformation has finished."); + } + } + Result result = transStatus.getResult(transSplitter.getOriginalTransformation()); + errors += result.getNrErrors(); + } catch (Exception e) { + errors += 1; + log.logError("Unable to contact master server '" + masterServer.getName() + + "' to check master transformation : " + e.toString()); + } + } + + if ((parentJob != null && parentJob.isStopped()) || errors != 0) { + // + // Stop all slaves and the master on the slave servers + // + for (int s = 0; s < slaveServers.length && allFinished && errors == 0; s++) { + try { + String carteObjectId = carteObjectMap.get(slaves[s]); + WebResult webResult = slaveServers[s].stopTransformation(slaves[s].getName(), carteObjectId); + if (!WebResult.STRING_OK.equals(webResult.getResult())) { + log.logError("Unable to stop slave transformation '" + slaves[s].getName() + "' : " + webResult + .getMessage()); + } + } catch (Exception e) { + errors += 1; + log.logError("Unable to contact slave server '" + slaveServers[s].getName() + "' to stop transformation : " + + e.toString()); + } + } + + try { + String carteObjectId = carteObjectMap.get(masterTransMeta); + WebResult webResult = masterServer.stopTransformation(masterTransMeta.getName(), carteObjectId); + if (!WebResult.STRING_OK.equals(webResult.getResult())) { + log.logError("Unable to stop master transformation '" + masterServer.getName() + "' : " + webResult + .getMessage()); + } + } catch (Exception e) { + errors += 1; + log.logError("Unable to contact master server '" + masterServer.getName() + "' to stop the master : " + e + .toString()); + } + } + + // + // Keep waiting until all transformations have finished + // If needed, we stop them again and again until they yield. + // + if (!allFinished) { + // Not finished or error: wait a bit longer + if (log.isDetailed()) { + log.logDetailed("Clustered transformation is still running, waiting a few seconds..."); + } + try { + Thread.sleep(sleepTimeSeconds * 2000); + } catch (Exception e) { + // Ignore errors + } // Check all slaves every x seconds. + } + } + + log.logBasic("All transformations in the cluster have finished."); + + errors += cleanupCluster(log, transSplitter); + + return errors; + } + + /** + * Cleanup the cluster, including the master and all slaves, and return the number of errors that occurred. + * + * @param log the log channel interface + * @param transSplitter the TransSplitter object + * @return the number of errors that occurred in the clustered transformation + */ + public static int cleanupCluster(LogChannelInterface log, TransSplitter transSplitter) { + + SlaveServer[] slaveServers = transSplitter.getSlaveTargets(); + TransMeta[] slaves = transSplitter.getSlaves(); + SlaveServer masterServer; + try { + masterServer = transSplitter.getMasterServer(); + } catch (KettleException e) { + log.logError("Unable to obtain the master server from the cluster", e); + return 1; + } + TransMeta masterTransMeta = transSplitter.getMaster(); + int errors = 0; + + // All transformations have finished, with or without error. + // Now run a cleanup on all the transformation on the master and the slaves. + // + // Slaves first... + // + for (int s = 0; s < slaveServers.length; s++) { + try { + cleanupSlaveServer(transSplitter, slaveServers[s], slaves[s]); + } catch (Exception e) { + errors++; + log.logError("Unable to contact slave server '" + slaveServers[s].getName() + + "' to clean up slave transformation", e); + } + } + + // Clean up the master too + // + if (masterTransMeta != null && masterTransMeta.nrSteps() > 0) { + try { + cleanupSlaveServer(transSplitter, masterServer, masterTransMeta); + } catch (Exception e) { + errors++; + log.logError("Unable to contact master server '" + masterServer.getName() + + "' to clean up master transformation", e); + } + + // Also de-allocate all ports used for this clustered transformation on the master. + // + try { + // Deallocate all ports belonging to this clustered run, not anything else + // + masterServer.deAllocateServerSockets(transSplitter.getOriginalTransformation().getName(), transSplitter + .getClusteredRunId()); + } catch (Exception e) { + errors++; + log.logError("Unable to contact master server '" + masterServer.getName() + + "' to clean up port sockets for transformation'" + transSplitter.getOriginalTransformation().getName() + + "'", e); + } } + return errors; + } + + /** + * Cleanup the slave server as part of a clustered transformation. + * + * @param transSplitter the TransSplitter object + * @param slaveServer the slave server + * @param slaveTransMeta the slave transformation meta-data + * @throws KettleException if any errors occur during cleanup + */ + public static void cleanupSlaveServer(TransSplitter transSplitter, SlaveServer slaveServer, + TransMeta slaveTransMeta) throws KettleException { + String transName = slaveTransMeta.getName(); + try { + String carteObjectId = transSplitter.getCarteObjectMap().get(slaveTransMeta); + WebResult webResult = slaveServer.cleanupTransformation(transName, carteObjectId); + if (!WebResult.STRING_OK.equals(webResult.getResult())) { + throw new KettleException("Unable to run clean-up on slave server '" + slaveServer + "' for transformation '" + + transName + "' : " + webResult.getMessage()); + } + } catch (Exception e) { + throw new KettleException("Unexpected error contacting slave server '" + slaveServer + + "' to clear up transformation '" + transName + "'", e); + } + } + + /** + * Gets the clustered transformation result. + * + * @param log the log channel interface + * @param transSplitter the TransSplitter object + * @param parentJob the parent job + * @return the clustered transformation result + */ + public static final Result getClusteredTransformationResult(LogChannelInterface log, TransSplitter transSplitter, + Job parentJob) { + return getClusteredTransformationResult(log, transSplitter, parentJob, false); + } + + /** + * Gets the clustered transformation result. + * + * @param log the log channel interface + * @param transSplitter the TransSplitter object + * @param parentJob the parent job + * @param loggingRemoteWork log remote execution logs locally + * @return the clustered transformation result + */ + public static final Result getClusteredTransformationResult(LogChannelInterface log, TransSplitter transSplitter, + Job parentJob, boolean loggingRemoteWork) { + Result result = new Result(); // - // Get the date range from the logging table: from the last end_date to now. (currentDate) + // See if the remote transformations have finished. + // We could just look at the master, but I doubt that that is enough in all situations. // - Object[] lastr = - transLogTableDatabaseConnection.getLastLogDate( logSchemaAndTable, transMeta.getName(), false, - LogStatus.END ); - if ( lastr != null && lastr.length > 0 ) { - startDate = (Date) lastr[0]; - if ( log.isDetailed() ) { - log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.StartDateFound" ) + startDate ); - } + SlaveServer[] slaveServers = transSplitter.getSlaveTargets(); // <-- ask these guys + TransMeta[] slaves = transSplitter.getSlaves(); + + SlaveServer masterServer; + try { + masterServer = transSplitter.getMasterServer(); + } catch (KettleException e) { + log.logError("Error getting the master server", e); + masterServer = null; + result.setNrErrors(result.getNrErrors() + 1); } + TransMeta master = transSplitter.getMaster(); + // Slaves first... // - // OK, we have a date-range. - // However, perhaps we need to look at a table before we make a final judgment? - // - if ( transMeta.getMaxDateConnection() != null && transMeta.getMaxDateTable() != null && transMeta - .getMaxDateTable().length() > 0 && transMeta.getMaxDateField() != null && transMeta.getMaxDateField() - .length() > 0 ) { - if ( log.isDetailed() ) { - log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.LookingForMaxdateConnection", "" + transMeta - .getMaxDateConnection() ) ); - } - DatabaseMeta maxcon = transMeta.getMaxDateConnection(); - if ( maxcon != null ) { - Database maxdb = new Database( this, maxcon ); - maxdb.shareVariablesWith( this ); + for (int s = 0; s < slaveServers.length; s++) { try { - if ( log.isDetailed() ) { - log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.OpeningMaximumDateConnection" ) ); - } - maxdb.connect(); - maxdb.setCommit( logCommitSize ); - - // - // Determine the endDate by looking at a field in a table... - // - String sql = "SELECT MAX(" + transMeta.getMaxDateField() + ") FROM " + transMeta.getMaxDateTable(); - RowMetaAndData r1 = maxdb.getOneRow( sql ); - if ( r1 != null ) { - // OK, we have a value, what's the offset? - Date maxvalue = r1.getRowMeta().getDate( r1.getData(), 0 ); - if ( maxvalue != null ) { - if ( log.isDetailed() ) { - log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.LastDateFoundOnTheMaxdateConnection" ) - + r1 ); - } - endDate.setTime( (long) ( maxvalue.getTime() + ( transMeta.getMaxDateOffset() * 1000 ) ) ); - } - } else { - if ( log.isDetailed() ) { - log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.NoLastDateFoundOnTheMaxdateConnection" ) ); - } - } - } catch ( KettleException e ) { - throw new KettleTransException( BaseMessages.getString( PKG, "Trans.Exception.ErrorConnectingToDatabase", - "" + transMeta.getMaxDateConnection() ), e ); - } finally { - maxdb.disconnect(); - } - } else { - throw new KettleTransException( BaseMessages.getString( PKG, - "Trans.Exception.MaximumDateConnectionCouldNotBeFound", "" + transMeta.getMaxDateConnection() ) ); - } - } - - // Determine the last date of all dependend tables... - // Get the maximum in depdate... - if ( transMeta.nrDependencies() > 0 ) { - if ( log.isDetailed() ) { - log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.CheckingForMaxDependencyDate" ) ); - } - // - // Maybe one of the tables where this transformation is dependent on has changed? - // If so we need to change the start-date! - // - depDate = Const.MIN_DATE; - Date maxdepdate = Const.MIN_DATE; - if ( lastr != null && lastr.length > 0 ) { - Date dep = (Date) lastr[1]; // #1: last depdate - if ( dep != null ) { - maxdepdate = dep; - depDate = dep; - } - } - - for ( int i = 0; i < transMeta.nrDependencies(); i++ ) { - TransDependency td = transMeta.getDependency( i ); - DatabaseMeta depcon = td.getDatabase(); - if ( depcon != null ) { - Database depdb = new Database( this, depcon ); - try { - depdb.connect(); - depdb.setCommit( logCommitSize ); - - String sql = "SELECT MAX(" + td.getFieldname() + ") FROM " + td.getTablename(); - RowMetaAndData r1 = depdb.getOneRow( sql ); - if ( r1 != null ) { - // OK, we have a row, get the result! - Date maxvalue = (Date) r1.getData()[0]; - if ( maxvalue != null ) { - if ( log.isDetailed() ) { - log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.FoundDateFromTable", td.getTablename(), - "." + td.getFieldname(), " = " + maxvalue.toString() ) ); - } - if ( maxvalue.getTime() > maxdepdate.getTime() ) { - maxdepdate = maxvalue; - } - } else { - throw new KettleTransException( BaseMessages.getString( PKG, - "Trans.Exception.UnableToGetDependencyInfoFromDB", td.getDatabase().getName() + ".", td - .getTablename() + ".", td.getFieldname() ) ); - } - } else { - throw new KettleTransException( BaseMessages.getString( PKG, - "Trans.Exception.UnableToGetDependencyInfoFromDB", td.getDatabase().getName() + ".", td - .getTablename() + ".", td.getFieldname() ) ); + // Get the detailed status of the slave transformation... + // + SlaveServerTransStatus transStatus = slaveServers[s].getTransStatus(slaves[s].getName(), "", 0); + Result transResult = transStatus.getResult(slaves[s]); + + result.add(transResult); + + if (loggingRemoteWork) { + log.logBasic("-- Slave : " + slaveServers[s].getName()); + log.logBasic(transStatus.getLoggingString()); } - } catch ( KettleException e ) { - throw new KettleTransException( BaseMessages.getString( PKG, "Trans.Exception.ErrorInDatabase", "" + td - .getDatabase() ), e ); - } finally { - depdb.disconnect(); - } - } else { - throw new KettleTransException( BaseMessages.getString( PKG, "Trans.Exception.ConnectionCouldNotBeFound", - "" + td.getDatabase() ) ); - } - if ( log.isDetailed() ) { - log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.Maxdepdate" ) + ( XMLHandler.date2string( - maxdepdate ) ) ); - } - } - - // OK, so we now have the maximum depdate; - // If it is larger, it means we have to read everything back in again. - // Maybe something has changed that we need! - // - if ( maxdepdate.getTime() > depDate.getTime() ) { - depDate = maxdepdate; - startDate = Const.MIN_DATE; - } - } else { - depDate = currentDate; + } catch (Exception e) { + result.setNrErrors(result.getNrErrors() + 1); + log.logError("Unable to contact slave server '" + slaveServers[s].getName() + + "' to get result of slave transformation : " + e.toString()); + } } - } - // OK, now we have a date-range. See if we need to set a maximum! - if ( transMeta.getMaxDateDifference() > 0.0 && // Do we have a difference specified? - startDate.getTime() > Const.MIN_DATE.getTime() // Is the startdate > Minimum? - ) { - // See if the end-date is larger then Start_date + DIFF? - Date maxdesired = new Date( startDate.getTime() + ( (long) transMeta.getMaxDateDifference() * 1000 ) ); - - // If this is the case: lower the end-date. Pick up the next 'region' next time around. - // We do this to limit the workload in a single update session (e.g. for large fact tables) + // Clean up the master too // - if ( endDate.compareTo( maxdesired ) > 0 ) { - endDate = maxdesired; + if (master != null && master.nrSteps() > 0) { + try { + // Get the detailed status of the slave transformation... + // + SlaveServerTransStatus transStatus = masterServer.getTransStatus(master.getName(), "", 0); + Result transResult = transStatus.getResult(master); + + result.add(transResult); + + if (loggingRemoteWork) { + log.logBasic("-- Master : " + masterServer.getName()); + log.logBasic(transStatus.getLoggingString()); + } + } catch (Exception e) { + result.setNrErrors(result.getNrErrors() + 1); + log.logError("Unable to contact master server '" + masterServer.getName() + + "' to get result of master transformation : " + e.toString()); + } } - } - } catch ( KettleException e ) { - throw new KettleTransException( BaseMessages.getString( PKG, "Trans.Exception.ErrorCalculatingDateRange", - logTable ), e ); - } + return result; + } + + /** + * Send the transformation for execution to a Carte slave server. + * + * @param transMeta the transformation meta-data + * @param executionConfiguration the transformation execution configuration + * @param repository the repository + * @return The Carte object ID on the server. + * @throws KettleException if any errors occur during the dispatch to the slave server + */ + public static String sendToSlaveServer(TransMeta transMeta, TransExecutionConfiguration executionConfiguration, + Repository repository, IMetaStore metaStore) throws KettleException { + String carteObjectId; + SlaveServer slaveServer = executionConfiguration.getRemoteServer(); + + if (slaveServer == null) { + throw new KettleException("No slave server specified"); + } + if (Const.isEmpty(transMeta.getName())) { + throw new KettleException("The transformation needs a name to uniquely identify it by on the remote server."); + } - // Be careful, We DO NOT close the trans log table database connection!!! - // It's closed later in beginProcessing() to prevent excessive connect/disconnect repetitions. + try { + // Inject certain internal variables to make it more intuitive. + // + Map vars = new HashMap(); - } + for (String var : Const.INTERNAL_TRANS_VARIABLES) { + vars.put(var, transMeta.getVariable(var)); + } + for (String var : Const.INTERNAL_JOB_VARIABLES) { + vars.put(var, transMeta.getVariable(var)); + } - /** - * Begin processing. Also handle logging operations related to the start of the transformation - * - * @throws KettleTransException - * the kettle trans exception - */ - public void beginProcessing() throws KettleTransException { - TransLogTable transLogTable = transMeta.getTransLogTable(); - int intervalInSeconds = Const.toInt( environmentSubstitute( transLogTable.getLogInterval() ), -1 ); + executionConfiguration.getVariables().putAll(vars); + slaveServer.injectVariables(executionConfiguration.getVariables()); - try { - String logTable = transLogTable.getActualTableName(); + slaveServer.getLogChannel().setLogLevel(executionConfiguration.getLogLevel()); - SimpleDateFormat df = new SimpleDateFormat( REPLAY_DATE_FORMAT ); - log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.TransformationCanBeReplayed" ) + df.format( - currentDate ) ); + if (executionConfiguration.isPassingExport()) { - try { - if ( transLogTableDatabaseConnection != null && !Const.isEmpty( logTable ) && !Const.isEmpty( transMeta - .getName() ) ) { - transLogTableDatabaseConnection.writeLogRecord( transLogTable, LogStatus.START, this, null ); + // First export the job... + // + FileObject tempFile = + KettleVFS.createTempFile("transExport", ".zip", System.getProperty("java.io.tmpdir"), transMeta); - // Pass in a commit to release transaction locks and to allow a user to actually see the log record. - // - if ( !transLogTableDatabaseConnection.isAutoCommit() ) { - transLogTableDatabaseConnection.commitLog( true, transLogTable ); - } + TopLevelResource topLevelResource = + ResourceUtil.serializeResourceExportInterface(tempFile.getName().toString(), transMeta, transMeta, + repository, metaStore, executionConfiguration.getXML(), CONFIGURATION_IN_EXPORT_FILENAME); - // If we need to do periodic logging, make sure to install a timer for this... - // - if ( intervalInSeconds > 0 ) { - final Timer timer = new Timer( getName() + " - interval logging timer" ); - TimerTask timerTask = new TimerTask() { - public void run() { - try { - endProcessing(); - } catch ( Exception e ) { - log.logError( BaseMessages.getString( PKG, "Trans.Exception.UnableToPerformIntervalLogging" ), e ); - // Also stop the show... - // - errors.incrementAndGet(); - stopAll(); + // Send the zip file over to the slave server... + // + String result = + slaveServer.sendExport(topLevelResource.getArchiveName(), AddExportServlet.TYPE_TRANS, topLevelResource + .getBaseResourceName()); + WebResult webResult = WebResult.fromXMLString(result); + if (!webResult.getResult().equalsIgnoreCase(WebResult.STRING_OK)) { + throw new KettleException("There was an error passing the exported transformation to the remote server: " + + Const.CR + webResult.getMessage()); } - } - }; - timer.schedule( timerTask, intervalInSeconds * 1000, intervalInSeconds * 1000 ); - - addTransListener( new TransAdapter() { - public void transFinished( Trans trans ) { - timer.cancel(); - } - } ); - } + carteObjectId = webResult.getId(); + } else { - // Add a listener to make sure that the last record is also written when transformation finishes... - // - addTransListener( new TransAdapter() { - public void transFinished( Trans trans ) throws KettleException { - try { - endProcessing(); + // Now send it off to the remote server... + // + String xml = new TransConfiguration(transMeta, executionConfiguration).getXML(); + String reply = slaveServer.sendXML(xml, RegisterTransServlet.CONTEXT_PATH + "/?xml=Y"); + WebResult webResult = WebResult.fromXMLString(reply); + if (!webResult.getResult().equalsIgnoreCase(WebResult.STRING_OK)) { + throw new KettleException("There was an error posting the transformation on the remote server: " + Const.CR + + webResult.getMessage()); + } + carteObjectId = webResult.getId(); + } - lastWrittenStepPerformanceSequenceNr = - writeStepPerformanceLogRecords( lastWrittenStepPerformanceSequenceNr, LogStatus.END ); + // Prepare the transformation + // + String reply = + slaveServer.execService(PrepareExecutionTransServlet.CONTEXT_PATH + "/?name=" + URLEncoder.encode(transMeta + .getName(), "UTF-8") + "&xml=Y&id=" + carteObjectId); + WebResult webResult = WebResult.fromXMLString(reply); + if (!webResult.getResult().equalsIgnoreCase(WebResult.STRING_OK)) { + throw new KettleException("There was an error preparing the transformation for excution on the remote server: " + + Const.CR + webResult.getMessage()); + } - } catch ( KettleException e ) { - throw new KettleException( BaseMessages.getString( PKG, - "Trans.Exception.UnableToPerformLoggingAtTransEnd" ), e ); - } + // Start the transformation + // + reply = + slaveServer.execService(StartExecutionTransServlet.CONTEXT_PATH + "/?name=" + URLEncoder.encode(transMeta + .getName(), "UTF-8") + "&xml=Y&id=" + carteObjectId); + webResult = WebResult.fromXMLString(reply); + + if (!webResult.getResult().equalsIgnoreCase(WebResult.STRING_OK)) { + throw new KettleException("There was an error starting the transformation on the remote server: " + Const.CR + + webResult.getMessage()); } - } ); + return carteObjectId; + } catch (KettleException ke) { + throw ke; + } catch (Exception e) { + throw new KettleException(e); } + } - // If we need to write out the step logging information, do so at the end of the transformation too... - // - StepLogTable stepLogTable = transMeta.getStepLogTable(); - if ( stepLogTable.isDefined() ) { - addTransListener( new TransAdapter() { - public void transFinished( Trans trans ) throws KettleException { - try { - writeStepLogInformation(); - } catch ( KettleException e ) { - throw new KettleException( BaseMessages.getString( PKG, - "Trans.Exception.UnableToPerformLoggingAtTransEnd" ), e ); - } + /** + * Checks whether the transformation is ready to start (i.e. execution preparation was successful) + * + * @return true if the transformation was prepared for execution successfully, false otherwise + * @see org.pentaho.di.trans.Trans#prepareExecution(String[]) + */ + public boolean isReadyToStart() { + return readyToStart; + } + + /** + * Sets the internal kettle variables. + * + * @param var the new internal kettle variables + */ + public void setInternalKettleVariables(VariableSpace var) { + if (transMeta != null && !Const.isEmpty(transMeta.getFilename())) { // we have a finename that's defined. + try { + FileObject fileObject = KettleVFS.getFileObject(transMeta.getFilename(), var); + FileName fileName = fileObject.getName(); + + // The filename of the transformation + variables.setVariable(Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_NAME, fileName.getBaseName()); + + // The directory of the transformation + FileName fileDir = fileName.getParent(); + variables.setVariable(Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY, fileDir.getURI()); + } catch (KettleFileException e) { + variables.setVariable(Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY, ""); + variables.setVariable(Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_NAME, ""); } - } ); + } else { + variables.setVariable(Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY, ""); + variables.setVariable(Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_NAME, ""); } - // If we need to write the log channel hierarchy and lineage information, add a listener for that too... - // - ChannelLogTable channelLogTable = transMeta.getChannelLogTable(); - if ( channelLogTable.isDefined() ) { - addTransListener( new TransAdapter() { - public void transFinished( Trans trans ) throws KettleException { - try { - writeLogChannelInformation(); - } catch ( KettleException e ) { - throw new KettleException( BaseMessages.getString( PKG, - "Trans.Exception.UnableToPerformLoggingAtTransEnd" ), e ); - } - } - } ); + boolean hasRepoDir = transMeta.getRepositoryDirectory() != null && transMeta.getRepository() != null; + + // The name of the transformation + variables.setVariable(Const.INTERNAL_VARIABLE_TRANSFORMATION_NAME, Const.NVL(transMeta.getName(), "")); + + // setup fallbacks + if (hasRepoDir) { + variables.setVariable(Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY, variables.getVariable( + Const.INTERNAL_VARIABLE_TRANSFORMATION_REPOSITORY_DIRECTORY)); + } else { + variables.setVariable(Const.INTERNAL_VARIABLE_TRANSFORMATION_REPOSITORY_DIRECTORY, variables.getVariable( + Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY)); } - // See if we need to write the step performance records at intervals too... - // - PerformanceLogTable performanceLogTable = transMeta.getPerformanceLogTable(); - int perfLogInterval = Const.toInt( environmentSubstitute( performanceLogTable.getLogInterval() ), -1 ); - if ( performanceLogTable.isDefined() && perfLogInterval > 0 ) { - final Timer timer = new Timer( getName() + " - step performance log interval timer" ); - TimerTask timerTask = new TimerTask() { - public void run() { - try { - lastWrittenStepPerformanceSequenceNr = - writeStepPerformanceLogRecords( lastWrittenStepPerformanceSequenceNr, LogStatus.RUNNING ); - } catch ( Exception e ) { - log.logError( BaseMessages.getString( PKG, - "Trans.Exception.UnableToPerformIntervalPerformanceLogging" ), e ); - // Also stop the show... - // - errors.incrementAndGet(); - stopAll(); - } + // TODO PUT THIS INSIDE OF THE "IF" + // The name of the directory in the repository + variables.setVariable(Const.INTERNAL_VARIABLE_TRANSFORMATION_REPOSITORY_DIRECTORY, transMeta + .getRepositoryDirectory() != null ? transMeta.getRepositoryDirectory().getPath() : ""); + + // Here we don't clear the definition of the job specific parameters, as they may come in handy. + // A transformation can be called from a job and may inherit the job internal variables + // but the other around is not possible. + + if (hasRepoDir) { + variables.setVariable(Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY, variables.getVariable( + Const.INTERNAL_VARIABLE_TRANSFORMATION_REPOSITORY_DIRECTORY)); + if ("/".equals(variables.getVariable(Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY))) { + variables.setVariable(Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY, ""); } - }; - timer.schedule( timerTask, perfLogInterval * 1000, perfLogInterval * 1000 ); + } else { + variables.setVariable(Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY, variables.getVariable( + Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY)); + } + } - addTransListener( new TransAdapter() { - public void transFinished( Trans trans ) { - timer.cancel(); + /** + * Copies variables from a given variable space to this transformation. + * + * @param space the variable space + * @see org.pentaho.di.core.variables.VariableSpace#copyVariablesFrom(org.pentaho.di.core.variables.VariableSpace) + */ + public void copyVariablesFrom(VariableSpace space) { + variables.copyVariablesFrom(space); + } + + /** + * Substitutes any variable values into the given string, and returns the resolved string. + * + * @param aString the string to resolve against environment variables + * @return the string after variables have been resolved/susbstituted + * @see org.pentaho.di.core.variables.VariableSpace#environmentSubstitute(java.lang.String) + */ + public String environmentSubstitute(String aString) { + return variables.environmentSubstitute(aString); + } + + /** + * Substitutes any variable values into each of the given strings, and returns an array containing the resolved + * string(s). + * + * @param aString an array of strings to resolve against environment variables + * @return the array of strings after variables have been resolved/susbstituted + * @see org.pentaho.di.core.variables.VariableSpace#environmentSubstitute(java.lang.String[]) + */ + public String[] environmentSubstitute(String[] aString) { + return variables.environmentSubstitute(aString); + } + + public String fieldSubstitute(String aString, RowMetaInterface rowMeta, Object[] rowData) + throws KettleValueException { + return variables.fieldSubstitute(aString, rowMeta, rowData); + } + + /** + * Gets the parent variable space. + * + * @return the parent variable space + * @see org.pentaho.di.core.variables.VariableSpace#getParentVariableSpace() + */ + public VariableSpace getParentVariableSpace() { + return variables.getParentVariableSpace(); + } + + /** + * Sets the parent variable space. + * + * @param parent the new parent variable space + * @see org.pentaho.di.core.variables.VariableSpace#setParentVariableSpace( + *org.pentaho.di.core.variables.VariableSpace) + */ + public void setParentVariableSpace(VariableSpace parent) { + variables.setParentVariableSpace(parent); + } + + /** + * Gets the value of the specified variable, or returns a default value if no such variable exists. + * + * @param variableName the variable name + * @param defaultValue the default value + * @return the value of the specified variable, or returns a default value if no such variable exists + * @see org.pentaho.di.core.variables.VariableSpace#getVariable(java.lang.String, java.lang.String) + */ + public String getVariable(String variableName, String defaultValue) { + return variables.getVariable(variableName, defaultValue); + } + + /** + * Gets the value of the specified variable, or returns a default value if no such variable exists. + * + * @param variableName the variable name + * @return the value of the specified variable, or returns a default value if no such variable exists + * @see org.pentaho.di.core.variables.VariableSpace#getVariable(java.lang.String) + */ + public String getVariable(String variableName) { + return variables.getVariable(variableName); + } + + /** + * Returns a boolean representation of the specified variable after performing any necessary substitution. Truth + * values include case-insensitive versions of "Y", "YES", "TRUE" or "1". + * + * @param variableName the variable name + * @param defaultValue the default value + * @return a boolean representation of the specified variable after performing any necessary substitution + * @see org.pentaho.di.core.variables.VariableSpace#getBooleanValueOfVariable(java.lang.String, boolean) + */ + public boolean getBooleanValueOfVariable(String variableName, boolean defaultValue) { + if (!Const.isEmpty(variableName)) { + String value = environmentSubstitute(variableName); + if (!Const.isEmpty(value)) { + return ValueMeta.convertStringToBoolean(value); } - } ); } - } catch ( KettleException e ) { - throw new KettleTransException( BaseMessages.getString( PKG, "Trans.Exception.ErrorWritingLogRecordToTable", - logTable ), e ); - } finally { - // If we use interval logging, we keep the connection open for performance reasons... - // - if ( transLogTableDatabaseConnection != null && ( intervalInSeconds <= 0 ) ) { - transLogTableDatabaseConnection.disconnect(); - transLogTableDatabaseConnection = null; - } - } - } catch ( KettleException e ) { - throw new KettleTransException( BaseMessages.getString( PKG, - "Trans.Exception.UnableToBeginProcessingTransformation" ), e ); - } - } - - /** - * Writes log channel information to a channel logging table (if one has been configured). - * - * @throws KettleException - * if any errors occur during logging - */ - protected void writeLogChannelInformation() throws KettleException { - Database db = null; - ChannelLogTable channelLogTable = transMeta.getChannelLogTable(); - - // PDI-7070: If parent trans or job has the same channel logging info, don't duplicate log entries - Trans t = getParentTrans(); - if ( t != null ) { - if ( channelLogTable.equals( t.getTransMeta().getChannelLogTable() ) ) { - return; - } - } - - Job j = getParentJob(); - - if ( j != null ) { - if ( channelLogTable.equals( j.getJobMeta().getChannelLogTable() ) ) { - return; - } - } - // end PDI-7070 - - try { - db = new Database( this, channelLogTable.getDatabaseMeta() ); - db.shareVariablesWith( this ); - db.connect(); - db.setCommit( logCommitSize ); - - List loggingHierarchyList = getLoggingHierarchy(); - for ( LoggingHierarchy loggingHierarchy : loggingHierarchyList ) { - db.writeLogRecord( channelLogTable, LogStatus.START, loggingHierarchy, null ); - } - - // Also time-out the log records in here... - // - db.cleanupLogRecords( channelLogTable ); - } catch ( Exception e ) { - throw new KettleException( BaseMessages.getString( PKG, - "Trans.Exception.UnableToWriteLogChannelInformationToLogTable" ), e ); - } finally { - if ( !db.isAutoCommit() ) { - db.commit( true ); - } - db.disconnect(); - } - } - - /** - * Writes step information to a step logging table (if one has been configured). - * - * @throws KettleException - * if any errors occur during logging - */ - protected void writeStepLogInformation() throws KettleException { - Database db = null; - StepLogTable stepLogTable = getTransMeta().getStepLogTable(); - try { - db = createDataBase( stepLogTable.getDatabaseMeta() ); - db.shareVariablesWith( this ); - db.connect(); - db.setCommit( logCommitSize ); - - for ( StepMetaDataCombi combi : getSteps() ) { - db.writeLogRecord( stepLogTable, LogStatus.START, combi, null ); - } - - db.cleanupLogRecords( stepLogTable ); - } catch ( Exception e ) { - throw new KettleException( BaseMessages.getString( PKG, - "Trans.Exception.UnableToWriteStepInformationToLogTable" ), e ); - } finally { - if ( !db.isAutoCommit() ) { - db.commit( true ); - } - db.disconnect(); - } - - } - - protected Database createDataBase( DatabaseMeta meta ) { - return new Database( this, meta ); - } - - protected synchronized void writeMetricsInformation() throws KettleException { - // - List metricsList = - MetricsUtil.getDuration( log.getLogChannelId(), Metrics.METRIC_PLUGIN_REGISTRY_REGISTER_EXTENSIONS_START ); - if ( !metricsList.isEmpty() ) { - System.out.println( metricsList.get( 0 ) ); - } - - metricsList = - MetricsUtil.getDuration( log.getLogChannelId(), Metrics.METRIC_PLUGIN_REGISTRY_PLUGIN_REGISTRATION_START ); - if ( !metricsList.isEmpty() ) { - System.out.println( metricsList.get( 0 ) ); - } - - long total = 0; - metricsList = - MetricsUtil.getDuration( log.getLogChannelId(), Metrics.METRIC_PLUGIN_REGISTRY_PLUGIN_TYPE_REGISTRATION_START ); - if ( metricsList != null ) { - for ( MetricsDuration duration : metricsList ) { - total += duration.getDuration(); - System.out.println( " - " + duration.toString() + " Total=" + total ); - } - } - - Database db = null; - MetricsLogTable metricsLogTable = transMeta.getMetricsLogTable(); - try { - db = new Database( this, metricsLogTable.getDatabaseMeta() ); - db.shareVariablesWith( this ); - db.connect(); - db.setCommit( logCommitSize ); - - List logChannelIds = LoggingRegistry.getInstance().getLogChannelChildren( getLogChannelId() ); - for ( String logChannelId : logChannelIds ) { - Deque snapshotList = - MetricsRegistry.getInstance().getSnapshotLists().get( logChannelId ); - if ( snapshotList != null ) { - Iterator iterator = snapshotList.iterator(); - while ( iterator.hasNext() ) { - MetricsSnapshotInterface snapshot = iterator.next(); - db.writeLogRecord( metricsLogTable, LogStatus.START, new LoggingMetric( batchId, snapshot ), null ); - } - } - - Map snapshotMap = - MetricsRegistry.getInstance().getSnapshotMaps().get( logChannelId ); - if ( snapshotMap != null ) { - synchronized ( snapshotMap ) { - Iterator iterator = snapshotMap.values().iterator(); - while ( iterator.hasNext() ) { - MetricsSnapshotInterface snapshot = iterator.next(); - db.writeLogRecord( metricsLogTable, LogStatus.START, new LoggingMetric( batchId, snapshot ), null ); - } - } - } - } - - // Also time-out the log records in here... - // - db.cleanupLogRecords( metricsLogTable ); - } catch ( Exception e ) { - throw new KettleException( BaseMessages.getString( PKG, - "Trans.Exception.UnableToWriteMetricsInformationToLogTable" ), e ); - } finally { - if ( !db.isAutoCommit() ) { - db.commit( true ); - } - db.disconnect(); - } - } - - /** - * Gets the result of the transformation. The Result object contains such measures as the number of errors, number of - * lines read/written/input/output/updated/rejected, etc. - * - * @return the Result object containing resulting measures from execution of the transformation - */ - public Result getResult() { - if ( steps == null ) { - return null; - } - - Result result = new Result(); - result.setNrErrors( errors.longValue() ); - result.setResult( errors.longValue() == 0 ); - TransLogTable transLogTable = transMeta.getTransLogTable(); - - for ( int i = 0; i < steps.size(); i++ ) { - StepMetaDataCombi sid = steps.get( i ); - StepInterface step = sid.step; - - result.setNrErrors( result.getNrErrors() + sid.step.getErrors() ); - result.getResultFiles().putAll( step.getResultFiles() ); - - if ( step.getStepname().equals( transLogTable.getSubjectString( TransLogTable.ID.LINES_READ ) ) ) { - result.setNrLinesRead( result.getNrLinesRead() + step.getLinesRead() ); - } - if ( step.getStepname().equals( transLogTable.getSubjectString( TransLogTable.ID.LINES_INPUT ) ) ) { - result.setNrLinesInput( result.getNrLinesInput() + step.getLinesInput() ); - } - if ( step.getStepname().equals( transLogTable.getSubjectString( TransLogTable.ID.LINES_WRITTEN ) ) ) { - result.setNrLinesWritten( result.getNrLinesWritten() + step.getLinesWritten() ); - } - if ( step.getStepname().equals( transLogTable.getSubjectString( TransLogTable.ID.LINES_OUTPUT ) ) ) { - result.setNrLinesOutput( result.getNrLinesOutput() + step.getLinesOutput() ); - } - if ( step.getStepname().equals( transLogTable.getSubjectString( TransLogTable.ID.LINES_UPDATED ) ) ) { - result.setNrLinesUpdated( result.getNrLinesUpdated() + step.getLinesUpdated() ); - } - if ( step.getStepname().equals( transLogTable.getSubjectString( TransLogTable.ID.LINES_REJECTED ) ) ) { - result.setNrLinesRejected( result.getNrLinesRejected() + step.getLinesRejected() ); - } - } - - result.setRows( resultRows ); - if ( !Const.isEmpty( resultFiles ) ) { - result.setResultFiles( new HashMap() ); - for ( ResultFile resultFile : resultFiles ) { - result.getResultFiles().put( resultFile.toString(), resultFile ); - } - } - result.setStopped( isStopped() ); - result.setLogChannelId( log.getLogChannelId() ); - - return result; - } - - /** - * End processing. Also handle any logging operations associated with the end of a transformation - * - * @return true if all end processing is successful, false otherwise - * @throws KettleException - * if any errors occur during processing - */ - private synchronized boolean endProcessing() throws KettleException { - LogStatus status; - - if ( isFinished() ) { - if ( isStopped() ) { - status = LogStatus.STOP; - } else { - status = LogStatus.END; - } - } else if ( isPaused() ) { - status = LogStatus.PAUSED; - } else { - status = LogStatus.RUNNING; - } - - TransLogTable transLogTable = transMeta.getTransLogTable(); - int intervalInSeconds = Const.toInt( environmentSubstitute( transLogTable.getLogInterval() ), -1 ); - - logDate = new Date(); - - // OK, we have some logging to do... - // - DatabaseMeta logcon = transMeta.getTransLogTable().getDatabaseMeta(); - String logTable = transMeta.getTransLogTable().getActualTableName(); - if ( logcon != null ) { - Database ldb = null; - - try { - // Let's not reconnect/disconnect all the time for performance reasons! - // - if ( transLogTableDatabaseConnection == null ) { - ldb = new Database( this, logcon ); - ldb.shareVariablesWith( this ); - ldb.connect(); - ldb.setCommit( logCommitSize ); - transLogTableDatabaseConnection = ldb; - } else { - ldb = transLogTableDatabaseConnection; + return defaultValue; + } + + /** + * Sets the values of the transformation's variables to the values from the parent variables. + * + * @param parent the parent + * @see org.pentaho.di.core.variables.VariableSpace#initializeVariablesFrom( + *org.pentaho.di.core.variables.VariableSpace) + */ + public void initializeVariablesFrom(VariableSpace parent) { + variables.initializeVariablesFrom(parent); + } + + /** + * Gets a list of variable names for the transformation. + * + * @return a list of variable names + * @see org.pentaho.di.core.variables.VariableSpace#listVariables() + */ + public String[] listVariables() { + return variables.listVariables(); + } + + /** + * Sets the value of the specified variable to the specified value. + * + * @param variableName the variable name + * @param variableValue the variable value + * @see org.pentaho.di.core.variables.VariableSpace#setVariable(java.lang.String, java.lang.String) + */ + public void setVariable(String variableName, String variableValue) { + variables.setVariable(variableName, variableValue); + } + + /** + * Shares a variable space from another variable space. This means that the object should take over the space used as + * argument. + * + * @param space the variable space + * @see org.pentaho.di.core.variables.VariableSpace#shareVariablesWith(org.pentaho.di.core.variables.VariableSpace) + */ + public void shareVariablesWith(VariableSpace space) { + variables = space; + } + + /** + * Injects variables using the given Map. The behavior should be that the properties object will be stored and at the + * time the VariableSpace is initialized (or upon calling this method if the space is already initialized). After + * injecting the link of the properties object should be removed. + * + * @param prop the property map + * @see org.pentaho.di.core.variables.VariableSpace#injectVariables(java.util.Map) + */ + public void injectVariables(Map prop) { + variables.injectVariables(prop); + } + + /** + * Pauses the transformation (pause all steps). + */ + public void pauseRunning() { + paused.set(true); + for (StepMetaDataCombi combi : steps) { + combi.step.pauseRunning(); } + } - // Write to the standard transformation log table... - // - if ( !Const.isEmpty( logTable ) ) { - ldb.writeLogRecord( transLogTable, status, this, null ); + /** + * Resumes running the transformation after a pause (resume all steps). + */ + public void resumeRunning() { + for (StepMetaDataCombi combi : steps) { + combi.step.resumeRunning(); } + paused.set(false); + } + + /** + * Checks whether the transformation is being previewed. + * + * @return true if the transformation is being previewed, false otherwise + */ + public boolean isPreview() { + return preview; + } + + /** + * Sets whether the transformation is being previewed. + * + * @param preview true if the transformation is being previewed, false otherwise + */ + public void setPreview(boolean preview) { + this.preview = preview; + } + + /** + * Gets the repository object for the transformation. + * + * @return the repository + */ + public Repository getRepository() { + + if (repository == null) { + // Does the transmeta have a repo? + // This is a valid case, when a non-repo trans is attempting to retrieve + // a transformation in the repository. + if (transMeta != null) { + return transMeta.getRepository(); + } + } + return repository; + } + + /** + * Sets the repository object for the transformation. + * + * @param repository the repository object to set + */ + public void setRepository(Repository repository) { + this.repository = repository; + if (transMeta != null) { + transMeta.setRepository(repository); + } + } - // Also time-out the log records in here... - // - if ( status.equals( LogStatus.END ) || status.equals( LogStatus.STOP ) ) { - ldb.cleanupLogRecords( transLogTable ); + /** + * Gets a named list (map) of step performance snapshots. + * + * @return a named list (map) of step performance snapshots + */ + public Map> getStepPerformanceSnapShots() { + return stepPerformanceSnapShots; + } + + /** + * Sets the named list (map) of step performance snapshots. + * + * @param stepPerformanceSnapShots a named list (map) of step performance snapshots to set + */ + public void setStepPerformanceSnapShots(Map> stepPerformanceSnapShots) { + this.stepPerformanceSnapShots = stepPerformanceSnapShots; + } + + /** + * Gets a list of the transformation listeners. Please do not attempt to modify this list externally. Returned list is + * mutable only for backward compatibility purposes. + * + * @return the transListeners + */ + public List getTransListeners() { + return transListeners; + } + + /** + * Sets the list of transformation listeners. + * + * @param transListeners the transListeners to set + */ + public void setTransListeners(List transListeners) { + this.transListeners = Collections.synchronizedList(transListeners); + } + + /** + * Adds a transformation listener. + * + * @param transListener the trans listener + */ + public void addTransListener(TransListener transListener) { + // PDI-5229 sync added + synchronized (transListeners) { + transListeners.add(transListener); } + } - // Commit the operations to prevent locking issues - // - if ( !ldb.isAutoCommit() ) { - ldb.commitLog( true, transMeta.getTransLogTable() ); - } - } catch ( KettleDatabaseException e ) { - // PDI-9790 error write to log db is transaction error - log.logError( BaseMessages.getString( PKG, "Database.Error.WriteLogTable", logTable ), e ); - errors.incrementAndGet(); - // end PDI-9790 - } catch ( Exception e ) { - throw new KettleException( BaseMessages.getString( PKG, "Trans.Exception.ErrorWritingLogRecordToTable", - transMeta.getTransLogTable().getActualTableName() ), e ); - } finally { - if ( intervalInSeconds <= 0 || ( status.equals( LogStatus.END ) || status.equals( LogStatus.STOP ) ) ) { - ldb.disconnect(); - transLogTableDatabaseConnection = null; // disconnected - } - } - } - return true; - } - - /** - * Write step performance log records. - * - * @param startSequenceNr - * the start sequence numberr - * @param status - * the logging status. If this is End, perform cleanup - * @return the new sequence number - * @throws KettleException - * if any errors occur during logging - */ - private int writeStepPerformanceLogRecords( int startSequenceNr, LogStatus status ) throws KettleException { - int lastSeqNr = 0; - Database ldb = null; - PerformanceLogTable performanceLogTable = transMeta.getPerformanceLogTable(); - - if ( !performanceLogTable.isDefined() || !transMeta.isCapturingStepPerformanceSnapShots() - || stepPerformanceSnapShots == null || stepPerformanceSnapShots.isEmpty() ) { - return 0; // nothing to do here! - } - - try { - ldb = new Database( this, performanceLogTable.getDatabaseMeta() ); - ldb.shareVariablesWith( this ); - ldb.connect(); - ldb.setCommit( logCommitSize ); - - // Write to the step performance log table... - // - RowMetaInterface rowMeta = performanceLogTable.getLogRecord( LogStatus.START, null, null ).getRowMeta(); - ldb.prepareInsert( rowMeta, performanceLogTable.getActualSchemaName(), performanceLogTable.getActualTableName() ); - - synchronized ( stepPerformanceSnapShots ) { - Iterator> iterator = stepPerformanceSnapShots.values().iterator(); - while ( iterator.hasNext() ) { - List snapshots = iterator.next(); - synchronized ( snapshots ) { - Iterator snapshotsIterator = snapshots.iterator(); - while ( snapshotsIterator.hasNext() ) { - StepPerformanceSnapShot snapshot = snapshotsIterator.next(); - if ( snapshot.getSeqNr() >= startSequenceNr && snapshot - .getSeqNr() <= lastStepPerformanceSnapshotSeqNrAdded ) { - - RowMetaAndData row = performanceLogTable.getLogRecord( LogStatus.START, snapshot, null ); - - ldb.setValuesInsert( row.getRowMeta(), row.getData() ); - ldb.insertRow( true ); - } - lastSeqNr = snapshot.getSeqNr(); - } - } - } - } - - ldb.insertFinished( true ); - - // Finally, see if the log table needs cleaning up... - // - if ( status.equals( LogStatus.END ) ) { - ldb.cleanupLogRecords( performanceLogTable ); - } - - } catch ( Exception e ) { - throw new KettleException( BaseMessages.getString( PKG, - "Trans.Exception.ErrorWritingStepPerformanceLogRecordToTable" ), e ); - } finally { - if ( ldb != null ) { - ldb.disconnect(); - } - } - - return lastSeqNr + 1; - } - - /** - * Close unique database connections. If there are errors in the Result, perform a rollback - * - * @param result - * the result of the transformation execution - */ - private void closeUniqueDatabaseConnections( Result result ) { - - // Don't close any connections if the parent job is using the same transaction - // - if ( parentJob != null && transactionId != null && parentJob.getTransactionId() != null && transactionId.equals( - parentJob.getTransactionId() ) ) { - return; - } - - // Don't close any connections if the parent transformation is using the same transaction - // - if ( parentTrans != null && parentTrans.getTransMeta().isUsingUniqueConnections() && transactionId != null - && parentTrans.getTransactionId() != null && transactionId.equals( parentTrans.getTransactionId() ) ) { - return; - } - - // First we get all the database connections ... - // - DatabaseConnectionMap map = DatabaseConnectionMap.getInstance(); - synchronized ( map ) { - List databaseList = new ArrayList( map.getMap().values() ); - for ( Database database : databaseList ) { - if ( database.getConnectionGroup().equals( getTransactionId() ) ) { - try { - // This database connection belongs to this transformation. - // Let's roll it back if there is an error... - // - if ( result.getNrErrors() > 0 ) { - try { - database.rollback( true ); - log.logBasic( BaseMessages.getString( PKG, "Trans.Exception.TransactionsRolledBackOnConnection", - database.toString() ) ); - } catch ( Exception e ) { - throw new KettleDatabaseException( BaseMessages.getString( PKG, - "Trans.Exception.ErrorRollingBackUniqueConnection", database.toString() ), e ); - } - } else { - try { - database.commit( true ); - log.logBasic( BaseMessages.getString( PKG, "Trans.Exception.TransactionsCommittedOnConnection", database - .toString() ) ); - } catch ( Exception e ) { - throw new KettleDatabaseException( BaseMessages.getString( PKG, - "Trans.Exception.ErrorCommittingUniqueConnection", database.toString() ), e ); - } - } - } catch ( Exception e ) { - log.logError( BaseMessages.getString( PKG, "Trans.Exception.ErrorHandlingTransformationTransaction", - database.toString() ), e ); - result.setNrErrors( result.getNrErrors() + 1 ); - } finally { - try { - // This database connection belongs to this transformation. - database.closeConnectionOnly(); - } catch ( Exception e ) { - log.logError( BaseMessages.getString( PKG, "Trans.Exception.ErrorHandlingTransformationTransaction", - database.toString() ), e ); - result.setNrErrors( result.getNrErrors() + 1 ); - } finally { - // Remove the database from the list... - // - map.removeConnection( database.getConnectionGroup(), database.getPartitionId(), database ); - } - } - } - } - - // Who else needs to be informed of the rollback or commit? - // - List transactionListeners = map.getTransactionListeners( getTransactionId() ); - if ( result.getNrErrors() > 0 ) { - for ( DatabaseTransactionListener listener : transactionListeners ) { - try { - listener.rollback(); - } catch ( Exception e ) { - log.logError( BaseMessages.getString( PKG, "Trans.Exception.ErrorHandlingTransactionListenerRollback" ), - e ); - result.setNrErrors( result.getNrErrors() + 1 ); - } - } - } else { - for ( DatabaseTransactionListener listener : transactionListeners ) { - try { - listener.commit(); - } catch ( Exception e ) { - log.logError( BaseMessages.getString( PKG, "Trans.Exception.ErrorHandlingTransactionListenerCommit" ), e ); - result.setNrErrors( result.getNrErrors() + 1 ); - } - } - } - - } - } - - /** - * Find the run thread for the step with the specified name. - * - * @param stepname - * the step name - * @return a StepInterface object corresponding to the run thread for the specified step - */ - public StepInterface findRunThread( String stepname ) { - if ( steps == null ) { - return null; - } - - for ( int i = 0; i < steps.size(); i++ ) { - StepMetaDataCombi sid = steps.get( i ); - StepInterface step = sid.step; - if ( step.getStepname().equalsIgnoreCase( stepname ) ) { - return step; - } - } - return null; - } - - /** - * Find the base steps for the step with the specified name. - * - * @param stepname - * the step name - * @return the list of base steps for the specified step - */ - public List findBaseSteps( String stepname ) { - List baseSteps = new ArrayList(); - - if ( steps == null ) { - return baseSteps; - } - - for ( int i = 0; i < steps.size(); i++ ) { - StepMetaDataCombi sid = steps.get( i ); - StepInterface stepInterface = sid.step; - if ( stepInterface.getStepname().equalsIgnoreCase( stepname ) ) { - baseSteps.add( stepInterface ); - } - } - return baseSteps; - } - - /** - * Find the executing step copy for the step with the specified name and copy number - * - * @param stepname - * the step name - * @param copynr - * @return the executing step found or null if no copy could be found. - */ - public StepInterface findStepInterface( String stepname, int copyNr ) { - if ( steps == null ) { - return null; - } - - for ( int i = 0; i < steps.size(); i++ ) { - StepMetaDataCombi sid = steps.get( i ); - StepInterface stepInterface = sid.step; - if ( stepInterface.getStepname().equalsIgnoreCase( stepname ) && sid.copy == copyNr ) { - return stepInterface; - } - } - return null; - } - - /** - * Find the available executing step copies for the step with the specified name - * - * @param stepname - * the step name - * @param copynr - * @return the list of executing step copies found or null if no steps are available yet (incorrect usage) - */ - public List findStepInterfaces( String stepname ) { - if ( steps == null ) { - return null; - } - - List list = new ArrayList(); - - for ( int i = 0; i < steps.size(); i++ ) { - StepMetaDataCombi sid = steps.get( i ); - StepInterface stepInterface = sid.step; - if ( stepInterface.getStepname().equalsIgnoreCase( stepname ) ) { - list.add( stepInterface ); - } - } - return list; - } - - /** - * Find the data interface for the step with the specified name. - * - * @param name - * the step name - * @return the step data interface - */ - public StepDataInterface findDataInterface( String name ) { - if ( steps == null ) { - return null; - } - - for ( int i = 0; i < steps.size(); i++ ) { - StepMetaDataCombi sid = steps.get( i ); - StepInterface rt = sid.step; - if ( rt.getStepname().equalsIgnoreCase( name ) ) { - return sid.data; - } - } - return null; - } - - /** - * Gets the start date/time object for the transformation. - * - * @return Returns the startDate. - */ - public Date getStartDate() { - return startDate; - } - - /** - * Gets the end date/time object for the transformation. - * - * @return Returns the endDate. - */ - public Date getEndDate() { - return endDate; - } - - /** - * Checks whether the running transformation is being monitored. - * - * @return true the running transformation is being monitored, false otherwise - */ - public boolean isMonitored() { - return monitored; - } - - /** - * Sets whether the running transformation should be monitored. - * - * @param monitored - * true if the running transformation should be monitored, false otherwise - */ - public void setMonitored( boolean monitored ) { - this.monitored = monitored; - } - - /** - * Gets the meta-data for the transformation. - * - * @return Returns the transformation meta-data - */ - public TransMeta getTransMeta() { - return transMeta; - } - - /** - * Sets the meta-data for the transformation. - * - * @param transMeta - * The transformation meta-data to set. - */ - public void setTransMeta( TransMeta transMeta ) { - this.transMeta = transMeta; - } - - /** - * Gets the current date/time object. - * - * @return the current date - */ - public Date getCurrentDate() { - return currentDate; - } - - /** - * Gets the dependency date for the transformation. A transformation can have a list of dependency fields. If any of - * these fields have a maximum date higher than the dependency date of the last run, the date range is set to to (-oo, - * now). The use-case is the incremental population of Slowly Changing Dimensions (SCD). - * - * @return Returns the dependency date - */ - public Date getDepDate() { - return depDate; - } - - /** - * Gets the date the transformation was logged. - * - * @return the log date - */ - public Date getLogDate() { - return logDate; - } - - /** - * Gets the rowsets for the transformation. - * - * @return a list of rowsets - */ - public List getRowsets() { - return rowsets; - } - - /** - * Gets a list of steps in the transformation. - * - * @return a list of the steps in the transformation - */ - public List getSteps() { - return steps; - } - - /** - * Gets a string representation of the transformation. - * - * @return the string representation of the transformation - * @see java.lang.Object#toString() - */ - public String toString() { - if ( transMeta == null || transMeta.getName() == null ) { - return getClass().getSimpleName(); - } - - // See if there is a parent transformation. If so, print the name of the parent here as well... - // - StringBuffer string = new StringBuffer(); - - // If we're running as a mapping, we get a reference to the calling (parent) transformation as well... - // - if ( getParentTrans() != null ) { - string.append( '[' ).append( getParentTrans().toString() ).append( ']' ).append( '.' ); - } - - // When we run a mapping we also set a mapping step name in there... - // - if ( !Const.isEmpty( mappingStepName ) ) { - string.append( '[' ).append( mappingStepName ).append( ']' ).append( '.' ); - } - - string.append( transMeta.getName() ); - - return string.toString(); - } - - /** - * Gets the mapping inputs for each step in the transformation. - * - * @return an array of MappingInputs - */ - public MappingInput[] findMappingInput() { - if ( steps == null ) { - return null; - } - - List list = new ArrayList(); - - // Look in threads and find the MappingInput step thread... - for ( int i = 0; i < steps.size(); i++ ) { - StepMetaDataCombi smdc = steps.get( i ); - StepInterface step = smdc.step; - if ( step.getStepID().equalsIgnoreCase( "MappingInput" ) ) { - list.add( (MappingInput) step ); - } - } - return list.toArray( new MappingInput[list.size()] ); - } - - /** - * Gets the mapping outputs for each step in the transformation. - * - * @return an array of MappingOutputs - */ - public MappingOutput[] findMappingOutput() { - List list = new ArrayList(); - - if ( steps != null ) { - // Look in threads and find the MappingInput step thread... - for ( int i = 0; i < steps.size(); i++ ) { - StepMetaDataCombi smdc = steps.get( i ); - StepInterface step = smdc.step; - if ( step.getStepID().equalsIgnoreCase( "MappingOutput" ) ) { - list.add( (MappingOutput) step ); - } - } - } - return list.toArray( new MappingOutput[list.size()] ); - } - - /** - * Find the StepInterface (thread) by looking it up using the name. - * - * @param stepname - * The name of the step to look for - * @param copy - * the copy number of the step to look for - * @return the StepInterface or null if nothing was found. - */ - public StepInterface getStepInterface( String stepname, int copy ) { - if ( steps == null ) { - return null; - } - - // Now start all the threads... - for ( int i = 0; i < steps.size(); i++ ) { - StepMetaDataCombi sid = steps.get( i ); - if ( sid.stepname.equalsIgnoreCase( stepname ) && sid.copy == copy ) { - return sid.step; - } - } - - return null; - } - - /** - * Gets the replay date. The replay date is used to indicate that the transformation was replayed (re-tried, run - * again) with that particular replay date. You can use this in Text File/Excel Input to allow you to save error line - * numbers into a file (SOURCE_FILE.line for example) During replay, only the lines that have errors in them are - * passed to the next steps, the other lines are ignored. This is for the use case: if the document contained errors - * (bad dates, chars in numbers, etc), you simply send the document back to the source (the user/departement that - * created it probably) and when you get it back, re-run the last transformation. - * - * @return the replay date - */ - public Date getReplayDate() { - return replayDate; - } - - /** - * Sets the replay date. The replay date is used to indicate that the transformation was replayed (re-tried, run - * again) with that particular replay date. You can use this in Text File/Excel Input to allow you to save error line - * numbers into a file (SOURCE_FILE.line for example) During replay, only the lines that have errors in them are - * passed to the next steps, the other lines are ignored. This is for the use case: if the document contained errors - * (bad dates, chars in numbers, etc), you simply send the document back to the source (the user/departement that - * created it probably) and when you get it back, re-run the last transformation. - * - * @param replayDate - * the new replay date - */ - public void setReplayDate( Date replayDate ) { - this.replayDate = replayDate; - } - - /** - * Turn on safe mode during running: the transformation will run slower but with more checking enabled. - * - * @param safeModeEnabled - * true for safe mode - */ - public void setSafeModeEnabled( boolean safeModeEnabled ) { - this.safeModeEnabled = safeModeEnabled; - } - - /** - * Checks whether safe mode is enabled. - * - * @return Returns true if the safe mode is enabled: the transformation will run slower but with more checking enabled - */ - public boolean isSafeModeEnabled() { - return safeModeEnabled; - } - - /** - * This adds a row producer to the transformation that just got set up. It is preferable to run this BEFORE execute() - * but after prepareExecution() - * - * @param stepname - * The step to produce rows for - * @param copynr - * The copynr of the step to produce row for (normally 0 unless you have multiple copies running) - * @return the row producer - * @throws KettleException - * in case the thread/step to produce rows for could not be found. - * @see Trans#execute(String[]) - * @see Trans#prepareExecution(String[]) - */ - public RowProducer addRowProducer( String stepname, int copynr ) throws KettleException { - StepInterface stepInterface = getStepInterface( stepname, copynr ); - if ( stepInterface == null ) { - throw new KettleException( "Unable to find thread with name " + stepname + " and copy number " + copynr ); - } - - // We are going to add an extra RowSet to this stepInterface. - RowSet rowSet; - switch ( transMeta.getTransformationType() ) { - case Normal: - rowSet = new BlockingRowSet( transMeta.getSizeRowset() ); - break; - case SerialSingleThreaded: - rowSet = new SingleRowRowSet(); - break; - case SingleThreaded: - rowSet = new QueueRowSet(); - break; - default: - throw new KettleException( "Unhandled transformation type: " + transMeta.getTransformationType() ); - } - - // Add this rowset to the list of active rowsets for the selected step - stepInterface.getInputRowSets().add( rowSet ); - - return new RowProducer( stepInterface, rowSet ); - } - - /** - * Gets the parent job, or null if there is no parent. - * - * @return the parent job, or null if there is no parent - */ - public Job getParentJob() { - return parentJob; - } - - /** - * Sets the parent job for the transformation. - * - * @param parentJob - * The parent job to set - */ - public void setParentJob( Job parentJob ) { - this.logLevel = parentJob.getLogLevel(); - this.log.setLogLevel( logLevel ); - this.parentJob = parentJob; - - transactionId = calculateTransactionId(); - } - - /** - * Finds the StepDataInterface (currently) associated with the specified step. - * - * @param stepname - * The name of the step to look for - * @param stepcopy - * The copy number (0 based) of the step - * @return The StepDataInterface or null if non found. - */ - public StepDataInterface getStepDataInterface( String stepname, int stepcopy ) { - if ( steps == null ) { - return null; - } - - for ( int i = 0; i < steps.size(); i++ ) { - StepMetaDataCombi sid = steps.get( i ); - if ( sid.stepname.equals( stepname ) && sid.copy == stepcopy ) { - return sid.data; - } - } - return null; - } - - /** - * Checks whether the transformation has any steps that are halted. - * - * @return true if one or more steps are halted, false otherwise - */ - public boolean hasHaltedSteps() { - // not yet 100% sure of this, if there are no steps... or none halted? - if ( steps == null ) { - return false; - } - - for ( int i = 0; i < steps.size(); i++ ) { - StepMetaDataCombi sid = steps.get( i ); - if ( sid.data.getStatus() == StepExecutionStatus.STATUS_HALTED ) { - return true; - } - } - return false; - } - - /** - * Gets the job start date. - * - * @return the job start date - */ - public Date getJobStartDate() { - return jobStartDate; - } - - /** - * Gets the job end date. - * - * @return the job end date - */ - public Date getJobEndDate() { - return jobEndDate; - } - - /** - * Sets the job end date. - * - * @param jobEndDate - * the jobEndDate to set - */ - public void setJobEndDate( Date jobEndDate ) { - this.jobEndDate = jobEndDate; - } - - /** - * Sets the job start date. - * - * @param jobStartDate - * the jobStartDate to set - */ - public void setJobStartDate( Date jobStartDate ) { - this.jobStartDate = jobStartDate; - } - - /** - * Get the batch ID that is passed from the parent job to the transformation. If nothing is passed, it's the - * transformation's batch ID - * - * @return the parent job's batch ID, or the transformation's batch ID if there is no parent job - */ - public long getPassedBatchId() { - return passedBatchId; - } - - /** - * Sets the passed batch ID of the transformation from the batch ID of the parent job. - * - * @param jobBatchId - * the jobBatchId to set - */ - public void setPassedBatchId( long jobBatchId ) { - this.passedBatchId = jobBatchId; - } - - /** - * Gets the batch ID of the transformation. - * - * @return the batch ID of the transformation - */ - public long getBatchId() { - return batchId; - } - - /** - * Sets the batch ID of the transformation. - * - * @param batchId - * the batch ID to set - */ - public void setBatchId( long batchId ) { - this.batchId = batchId; - } - - /** - * Gets the name of the thread that contains the transformation. - * - * @deprecated please use getTransactionId() instead - * @return the thread name - */ - @Deprecated - public String getThreadName() { - return threadName; - } - - /** - * Sets the thread name for the transformation. - * - * @deprecated please use setTransactionId() instead - * @param threadName - * the thread name - */ - @Deprecated - public void setThreadName( String threadName ) { - this.threadName = threadName; - } - - /** - * Gets the status of the transformation (Halting, Finished, Paused, etc.) - * - * @return the status of the transformation - */ - public String getStatus() { - String message; - - if ( running ) { - if ( isStopped() ) { - message = STRING_HALTING; - } else { - if ( isFinished() ) { - message = STRING_FINISHED; - if ( getResult().getNrErrors() > 0 ) { - message += " (with errors)"; - } - } else if ( isPaused() ) { - message = STRING_PAUSED; - } else { - message = STRING_RUNNING; - } - } - } else if ( isStopped() ) { - message = STRING_STOPPED; - } else if ( preparing ) { - message = STRING_PREPARING; - } else if ( initializing ) { - message = STRING_INITIALIZING; - } else { - message = STRING_WAITING; - } - - return message; - } - - /** - * Checks whether the transformation is initializing. - * - * @return true if the transformation is initializing, false otherwise - */ - public boolean isInitializing() { - return initializing; - } - - /** - * Sets whether the transformation is initializing. - * - * @param initializing - * true if the transformation is initializing, false otherwise - */ - public void setInitializing( boolean initializing ) { - this.initializing = initializing; - } - - /** - * Checks whether the transformation is preparing for execution. - * - * @return true if the transformation is preparing for execution, false otherwise - */ - public boolean isPreparing() { - return preparing; - } - - /** - * Sets whether the transformation is preparing for execution. - * - * @param preparing - * true if the transformation is preparing for execution, false otherwise - */ - public void setPreparing( boolean preparing ) { - this.preparing = preparing; - } - - /** - * Checks whether the transformation is running. - * - * @return true if the transformation is running, false otherwise - */ - public boolean isRunning() { - return running; - } - - /** - * Sets whether the transformation is running. - * - * @param running - * true if the transformation is running, false otherwise - */ - public void setRunning( boolean running ) { - this.running = running; - } - - /** - * Execute the transformation in a clustered fashion. The transformation steps are split and collected in a - * TransSplitter object - * - * @param transMeta - * the transformation's meta-data - * @param executionConfiguration - * the execution configuration - * @return the transformation splitter object - * @throws KettleException - * the kettle exception - */ - public static final TransSplitter executeClustered( final TransMeta transMeta, - final TransExecutionConfiguration executionConfiguration ) throws KettleException { - if ( Const.isEmpty( transMeta.getName() ) ) { - throw new KettleException( "The transformation needs a name to uniquely identify it by on the remote server." ); - } - - TransSplitter transSplitter = new TransSplitter( transMeta ); - transSplitter.splitOriginalTransformation(); - - // Pass the clustered run ID to allow for parallel execution of clustered transformations - // - executionConfiguration.getVariables().put( Const.INTERNAL_VARIABLE_CLUSTER_RUN_ID, transSplitter - .getClusteredRunId() ); - - executeClustered( transSplitter, executionConfiguration ); - return transSplitter; - } - - /** - * Executes an existing TransSplitter, with the transformation already split. - * - * @param transSplitter - * the trans splitter - * @param executionConfiguration - * the execution configuration - * @throws KettleException - * the kettle exception - * @see org.pentaho.di.ui.spoon.delegates.SpoonTransformationDelegate - */ - public static final void executeClustered( final TransSplitter transSplitter, - final TransExecutionConfiguration executionConfiguration ) throws KettleException { - try { - // Send the transformations to the servers... - // - // First the master and the slaves... - // - TransMeta master = transSplitter.getMaster(); - final SlaveServer[] slaves = transSplitter.getSlaveTargets(); - final Thread[] threads = new Thread[slaves.length]; - final Throwable[] errors = new Throwable[slaves.length]; - - // Keep track of the various Carte object IDs - // - final Map carteObjectMap = transSplitter.getCarteObjectMap(); - - // - // Send them all on their way... - // - SlaveServer masterServer = null; - List masterSteps = master.getTransHopSteps( false ); - if ( masterSteps.size() > 0 ) { // If there is something that needs to be done on the master... - masterServer = transSplitter.getMasterServer(); - if ( executionConfiguration.isClusterPosting() ) { - TransConfiguration transConfiguration = new TransConfiguration( master, executionConfiguration ); - Map variables = transConfiguration.getTransExecutionConfiguration().getVariables(); - variables.put( Const.INTERNAL_VARIABLE_CLUSTER_SIZE, Integer.toString( slaves.length ) ); - variables.put( Const.INTERNAL_VARIABLE_CLUSTER_MASTER, "Y" ); - - // Parameters override the variables but they need to pass over the configuration too... - // - Map params = transConfiguration.getTransExecutionConfiguration().getParams(); - TransMeta ot = transSplitter.getOriginalTransformation(); - for ( String param : ot.listParameters() ) { - String value = - Const.NVL( ot.getParameterValue( param ), Const.NVL( ot.getParameterDefault( param ), ot.getVariable( - param ) ) ); - params.put( param, value ); - } - - String masterReply = - masterServer.sendXML( transConfiguration.getXML(), RegisterTransServlet.CONTEXT_PATH + "/?xml=Y" ); - WebResult webResult = WebResult.fromXMLString( masterReply ); - if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) { - throw new KettleException( "An error occurred sending the master transformation: " + webResult - .getMessage() ); - } - carteObjectMap.put( master, webResult.getId() ); - } - } - - // Then the slaves... - // These are started in a background thread. - // - for ( int i = 0; i < slaves.length; i++ ) { - final int index = i; - - final TransMeta slaveTrans = transSplitter.getSlaveTransMap().get( slaves[i] ); - - if ( executionConfiguration.isClusterPosting() ) { - Runnable runnable = new Runnable() { - public void run() { - try { - // Create a copy for local use... We get race-conditions otherwise... - // - TransExecutionConfiguration slaveTransExecutionConfiguration = - (TransExecutionConfiguration) executionConfiguration.clone(); - TransConfiguration transConfiguration = - new TransConfiguration( slaveTrans, slaveTransExecutionConfiguration ); - - Map variables = slaveTransExecutionConfiguration.getVariables(); - variables.put( Const.INTERNAL_VARIABLE_SLAVE_SERVER_NUMBER, Integer.toString( index ) ); - variables.put( Const.INTERNAL_VARIABLE_SLAVE_SERVER_NAME, slaves[index].getName() ); - variables.put( Const.INTERNAL_VARIABLE_CLUSTER_SIZE, Integer.toString( slaves.length ) ); - variables.put( Const.INTERNAL_VARIABLE_CLUSTER_MASTER, "N" ); - - // Parameters override the variables but they need to pass over the configuration too... - // - Map params = slaveTransExecutionConfiguration.getParams(); - TransMeta ot = transSplitter.getOriginalTransformation(); - for ( String param : ot.listParameters() ) { - String value = - Const.NVL( ot.getParameterValue( param ), Const.NVL( ot.getParameterDefault( param ), ot - .getVariable( param ) ) ); - params.put( param, value ); + /** + * Sets the list of stop-event listeners for the transformation. + * + * @param transStoppedListeners the list of stop-event listeners to set + */ + public void setTransStoppedListeners(List transStoppedListeners) { + this.transStoppedListeners = Collections.synchronizedList(transStoppedListeners); + } + + /** + * Gets the list of stop-event listeners for the transformation. This is not concurrent safe. Please note this is + * mutable implementation only for backward compatibility reasons. + * + * @return the list of stop-event listeners + */ + public List getTransStoppedListeners() { + return transStoppedListeners; + } + + /** + * Adds a stop-event listener to the transformation. + * + * @param transStoppedListener the stop-event listener to add + */ + public void addTransStoppedListener(TransStoppedListener transStoppedListener) { + transStoppedListeners.add(transStoppedListener); + } + + /** + * Checks if the transformation is paused. + * + * @return true if the transformation is paused, false otherwise + */ + public boolean isPaused() { + return paused.get(); + } + + /** + * Checks if the transformation is stopped. + * + * @return true if the transformation is stopped, false otherwise + */ + public boolean isStopped() { + return stopped.get(); + } + + /** + * Monitors a remote transformation every 5 seconds. + * + * @param log the log channel interface + * @param carteObjectId the Carte object ID + * @param transName the transformation name + * @param remoteSlaveServer the remote slave server + */ + public static void monitorRemoteTransformation(LogChannelInterface log, String carteObjectId, String transName, + SlaveServer remoteSlaveServer) { + monitorRemoteTransformation(log, carteObjectId, transName, remoteSlaveServer, 5); + } + + /** + * Monitors a remote transformation at the specified interval. + * + * @param log the log channel interface + * @param carteObjectId the Carte object ID + * @param transName the transformation name + * @param remoteSlaveServer the remote slave server + * @param sleepTimeSeconds the sleep time (in seconds) + */ + public static void monitorRemoteTransformation(LogChannelInterface log, String carteObjectId, String transName, + SlaveServer remoteSlaveServer, int sleepTimeSeconds) { + long errors = 0; + boolean allFinished = false; + while (!allFinished && errors == 0) { + allFinished = true; + errors = 0L; + + // Check the remote server + if (allFinished && errors == 0) { + try { + SlaveServerTransStatus transStatus = remoteSlaveServer.getTransStatus(transName, carteObjectId, 0); + if (transStatus.isRunning()) { + if (log.isDetailed()) { + log.logDetailed(transName, "Remote transformation is still running."); + } + allFinished = false; + } else { + if (log.isDetailed()) { + log.logDetailed(transName, "Remote transformation has finished."); + } + } + Result result = transStatus.getResult(); + errors += result.getNrErrors(); + } catch (Exception e) { + errors += 1; + log.logError(transName, "Unable to contact remote slave server '" + remoteSlaveServer.getName() + + "' to check transformation status : " + e.toString()); } + } - String slaveReply = - slaves[index].sendXML( transConfiguration.getXML(), RegisterTransServlet.CONTEXT_PATH + "/?xml=Y" ); - WebResult webResult = WebResult.fromXMLString( slaveReply ); - if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) { - throw new KettleException( "An error occurred sending a slave transformation: " + webResult - .getMessage() ); + // + // Keep waiting until all transformations have finished + // If needed, we stop them again and again until they yield. + // + if (!allFinished) { + // Not finished or error: wait a bit longer + if (log.isDetailed()) { + log.logDetailed(transName, "The remote transformation is still running, waiting a few seconds..."); } - carteObjectMap.put( slaveTrans, webResult.getId() ); - } catch ( Throwable t ) { - errors[index] = t; - } - } - }; - threads[i] = new Thread( runnable ); - } - } - - // Start the slaves - for ( int i = 0; i < threads.length; i++ ) { - if ( threads[i] != null ) { - threads[i].start(); - } - } - - // Wait until the slaves report back... - // Sending the XML over is the heaviest part - // Later we can do the others as well... - // - for ( int i = 0; i < threads.length; i++ ) { - if ( threads[i] != null ) { - threads[i].join(); - if ( errors[i] != null ) { - throw new KettleException( errors[i] ); - } - } - } - - if ( executionConfiguration.isClusterPosting() ) { - if ( executionConfiguration.isClusterPreparing() ) { - // Prepare the master... - if ( masterSteps.size() > 0 ) { // If there is something that needs to be done on the master... - String carteObjectId = carteObjectMap.get( master ); - String masterReply = - masterServer.execService( PrepareExecutionTransServlet.CONTEXT_PATH + "/?name=" + URLEncoder.encode( - master.getName(), "UTF-8" ) + "&id=" + URLEncoder.encode( carteObjectId, "UTF-8" ) + "&xml=Y" ); - WebResult webResult = WebResult.fromXMLString( masterReply ); - if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) { - throw new KettleException( - "An error occurred while preparing the execution of the master transformation: " + webResult - .getMessage() ); - } - } - - // Prepare the slaves - // WG: Should these be threaded like the above initialization? - for ( int i = 0; i < slaves.length; i++ ) { - TransMeta slaveTrans = transSplitter.getSlaveTransMap().get( slaves[i] ); - String carteObjectId = carteObjectMap.get( slaveTrans ); - String slaveReply = - slaves[i].execService( PrepareExecutionTransServlet.CONTEXT_PATH + "/?name=" + URLEncoder.encode( - slaveTrans.getName(), "UTF-8" ) + "&id=" + URLEncoder.encode( carteObjectId, "UTF-8" ) + "&xml=Y" ); - WebResult webResult = WebResult.fromXMLString( slaveReply ); - if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) { - throw new KettleException( "An error occurred while preparing the execution of a slave transformation: " - + webResult.getMessage() ); - } - } - } - - if ( executionConfiguration.isClusterStarting() ) { - // Start the master... - if ( masterSteps.size() > 0 ) { // If there is something that needs to be done on the master... - String carteObjectId = carteObjectMap.get( master ); - String masterReply = - masterServer.execService( StartExecutionTransServlet.CONTEXT_PATH + "/?name=" + URLEncoder.encode( - master.getName(), "UTF-8" ) + "&id=" + URLEncoder.encode( carteObjectId, "UTF-8" ) + "&xml=Y" ); - WebResult webResult = WebResult.fromXMLString( masterReply ); - if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) { - throw new KettleException( "An error occurred while starting the execution of the master transformation: " - + webResult.getMessage() ); - } - } - - // Start the slaves - // WG: Should these be threaded like the above initialization? - for ( int i = 0; i < slaves.length; i++ ) { - TransMeta slaveTrans = transSplitter.getSlaveTransMap().get( slaves[i] ); - String carteObjectId = carteObjectMap.get( slaveTrans ); - String slaveReply = - slaves[i].execService( StartExecutionTransServlet.CONTEXT_PATH + "/?name=" + URLEncoder.encode( - slaveTrans.getName(), "UTF-8" ) + "&id=" + URLEncoder.encode( carteObjectId, "UTF-8" ) + "&xml=Y" ); - WebResult webResult = WebResult.fromXMLString( slaveReply ); - if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) { - throw new KettleException( "An error occurred while starting the execution of a slave transformation: " - + webResult.getMessage() ); - } - } - } - } - } catch ( KettleException ke ) { - throw ke; - } catch ( Exception e ) { - throw new KettleException( "There was an error during transformation split", e ); - } - } - - /** - * Monitors a clustered transformation every second, after all the transformations in a cluster schema are running. - *
- * Now we should verify that they are all running as they should.
- * If a transformation has an error, we should kill them all.
- * This should happen in a separate thread to prevent blocking of the UI.
- *
- * When the master and slave transformations have all finished, we should also run
- * a cleanup on those transformations to release sockets, etc.
- *
- * - * @param log - * the log interface channel - * @param transSplitter - * the transformation splitter object - * @param parentJob - * the parent job when executed in a job, otherwise just set to null - * @return the number of errors encountered - */ - public static final long monitorClusteredTransformation( LogChannelInterface log, TransSplitter transSplitter, - Job parentJob ) { - return monitorClusteredTransformation( log, transSplitter, parentJob, 1 ); // monitor every 1 seconds - } - - /** - * Monitors a clustered transformation every second, after all the transformations in a cluster schema are running. - *
- * Now we should verify that they are all running as they should.
- * If a transformation has an error, we should kill them all.
- * This should happen in a separate thread to prevent blocking of the UI.
- *
- * When the master and slave transformations have all finished, we should also run
- * a cleanup on those transformations to release sockets, etc.
- *
- * - * @param log - * the subject to use for logging - * @param transSplitter - * the transformation splitter object - * @param parentJob - * the parent job when executed in a job, otherwise just set to null - * @param sleepTimeSeconds - * the sleep time in seconds in between slave transformation status polling - * @return the number of errors encountered - */ - public static final long monitorClusteredTransformation( LogChannelInterface log, TransSplitter transSplitter, - Job parentJob, int sleepTimeSeconds ) { - long errors = 0L; - - // - // See if the remote transformations have finished. - // We could just look at the master, but I doubt that that is enough in all - // situations. - // - SlaveServer[] slaveServers = transSplitter.getSlaveTargets(); // <-- ask - // these guys - TransMeta[] slaves = transSplitter.getSlaves(); - Map carteObjectMap = transSplitter.getCarteObjectMap(); - - SlaveServer masterServer; - try { - masterServer = transSplitter.getMasterServer(); - } catch ( KettleException e ) { - log.logError( "Error getting the master server", e ); - masterServer = null; - errors++; - } - TransMeta masterTransMeta = transSplitter.getMaster(); - - boolean allFinished = false; - while ( !allFinished && errors == 0 && ( parentJob == null || !parentJob.isStopped() ) ) { - allFinished = true; - errors = 0L; - - // Slaves first... - // - for ( int s = 0; s < slaveServers.length && allFinished && errors == 0; s++ ) { - try { - String carteObjectId = carteObjectMap.get( slaves[s] ); - SlaveServerTransStatus transStatus = slaveServers[s].getTransStatus( slaves[s].getName(), carteObjectId, 0 ); - if ( transStatus.isRunning() ) { - if ( log.isDetailed() ) { - log.logDetailed( "Slave transformation on '" + slaveServers[s] + "' is still running." ); - } - allFinished = false; - } else { - if ( log.isDetailed() ) { - log.logDetailed( "Slave transformation on '" + slaveServers[s] + "' has finished." ); - } - } - errors += transStatus.getNrStepErrors(); - } catch ( Exception e ) { - errors += 1; - log.logError( "Unable to contact slave server '" + slaveServers[s].getName() - + "' to check slave transformation : " + e.toString() ); - } - } - - // Check the master too - if ( allFinished && errors == 0 && masterTransMeta != null && masterTransMeta.nrSteps() > 0 ) { - try { - String carteObjectId = carteObjectMap.get( masterTransMeta ); - SlaveServerTransStatus transStatus = - masterServer.getTransStatus( masterTransMeta.getName(), carteObjectId, 0 ); - if ( transStatus.isRunning() ) { - if ( log.isDetailed() ) { - log.logDetailed( "Master transformation is still running." ); - } - allFinished = false; - } else { - if ( log.isDetailed() ) { - log.logDetailed( "Master transformation has finished." ); - } - } - Result result = transStatus.getResult( transSplitter.getOriginalTransformation() ); - errors += result.getNrErrors(); - } catch ( Exception e ) { - errors += 1; - log.logError( "Unable to contact master server '" + masterServer.getName() - + "' to check master transformation : " + e.toString() ); - } - } - - if ( ( parentJob != null && parentJob.isStopped() ) || errors != 0 ) { - // - // Stop all slaves and the master on the slave servers + try { + Thread.sleep(sleepTimeSeconds * 1000); + } catch (Exception e) { + // Ignore errors + } // Check all slaves every x seconds. + } + } + + log.logMinimal(transName, "The remote transformation has finished."); + + // Clean up the remote transformation // - for ( int s = 0; s < slaveServers.length && allFinished && errors == 0; s++ ) { - try { - String carteObjectId = carteObjectMap.get( slaves[s] ); - WebResult webResult = slaveServers[s].stopTransformation( slaves[s].getName(), carteObjectId ); - if ( !WebResult.STRING_OK.equals( webResult.getResult() ) ) { - log.logError( "Unable to stop slave transformation '" + slaves[s].getName() + "' : " + webResult - .getMessage() ); - } - } catch ( Exception e ) { + try { + WebResult webResult = remoteSlaveServer.cleanupTransformation(transName, carteObjectId); + if (!WebResult.STRING_OK.equals(webResult.getResult())) { + log.logError(transName, "Unable to run clean-up on remote transformation '" + transName + "' : " + webResult + .getMessage()); + errors += 1; + } + } catch (Exception e) { errors += 1; - log.logError( "Unable to contact slave server '" + slaveServers[s].getName() + "' to stop transformation : " - + e.toString() ); - } + log.logError(transName, "Unable to contact slave server '" + remoteSlaveServer.getName() + + "' to clean up transformation : " + e.toString()); } + } - try { - String carteObjectId = carteObjectMap.get( masterTransMeta ); - WebResult webResult = masterServer.stopTransformation( masterTransMeta.getName(), carteObjectId ); - if ( !WebResult.STRING_OK.equals( webResult.getResult() ) ) { - log.logError( "Unable to stop master transformation '" + masterServer.getName() + "' : " + webResult - .getMessage() ); - } - } catch ( Exception e ) { - errors += 1; - log.logError( "Unable to contact master server '" + masterServer.getName() + "' to stop the master : " + e - .toString() ); - } - } - - // - // Keep waiting until all transformations have finished - // If needed, we stop them again and again until they yield. - // - if ( !allFinished ) { - // Not finished or error: wait a bit longer - if ( log.isDetailed() ) { - log.logDetailed( "Clustered transformation is still running, waiting a few seconds..." ); + /** + * Adds a parameter definition to this transformation. + * + * @param key the name of the parameter + * @param defValue the default value for the parameter + * @param description the description of the parameter + * @throws DuplicateParamException the duplicate param exception + * @see org.pentaho.di.core.parameters.NamedParams#addParameterDefinition(java.lang.String, java.lang.String, + * java.lang.String) + */ + public void addParameterDefinition(String key, String defValue, String description) throws DuplicateParamException { + namedParams.addParameterDefinition(key, defValue, description); + } + + /** + * Gets the default value of the specified parameter. + * + * @param key the name of the parameter + * @return the default value of the parameter + * @throws UnknownParamException if the parameter does not exist + * @see org.pentaho.di.core.parameters.NamedParams#getParameterDefault(java.lang.String) + */ + public String getParameterDefault(String key) throws UnknownParamException { + return namedParams.getParameterDefault(key); + } + + /** + * Gets the description of the specified parameter. + * + * @param key the name of the parameter + * @return the parameter description + * @throws UnknownParamException if the parameter does not exist + * @see org.pentaho.di.core.parameters.NamedParams#getParameterDescription(java.lang.String) + */ + public String getParameterDescription(String key) throws UnknownParamException { + return namedParams.getParameterDescription(key); + } + + /** + * Gets the value of the specified parameter. + * + * @param key the name of the parameter + * @return the parameter value + * @throws UnknownParamException if the parameter does not exist + * @see org.pentaho.di.core.parameters.NamedParams#getParameterValue(java.lang.String) + */ + public String getParameterValue(String key) throws UnknownParamException { + return namedParams.getParameterValue(key); + } + + /** + * Gets a list of the parameters for the transformation. + * + * @return an array of strings containing the names of all parameters for the transformation + * @see org.pentaho.di.core.parameters.NamedParams#listParameters() + */ + public String[] listParameters() { + return namedParams.listParameters(); + } + + /** + * Sets the value for the specified parameter. + * + * @param key the name of the parameter + * @param value the name of the value + * @throws UnknownParamException if the parameter does not exist + * @see org.pentaho.di.core.parameters.NamedParams#setParameterValue(java.lang.String, java.lang.String) + */ + public void setParameterValue(String key, String value) throws UnknownParamException { + namedParams.setParameterValue(key, value); + } + + /** + * Remove all parameters. + * + * @see org.pentaho.di.core.parameters.NamedParams#eraseParameters() + */ + public void eraseParameters() { + namedParams.eraseParameters(); + } + + /** + * Clear the values of all parameters. + * + * @see org.pentaho.di.core.parameters.NamedParams#clearParameters() + */ + public void clearParameters() { + namedParams.clearParameters(); + } + + /** + * Activates all parameters by setting their values. If no values already exist, the method will attempt to set the + * parameter to the default value. If no default value exists, the method will set the value of the parameter to the + * empty string (""). + * + * @see org.pentaho.di.core.parameters.NamedParams#activateParameters() + */ + public void activateParameters() { + String[] keys = listParameters(); + + for (String key : keys) { + String value; + try { + value = getParameterValue(key); + } catch (UnknownParamException e) { + value = ""; + } + + String defValue; + try { + defValue = getParameterDefault(key); + } catch (UnknownParamException e) { + defValue = ""; + } + + if (Const.isEmpty(value)) { + setVariable(key, Const.NVL(defValue, "")); + } else { + setVariable(key, Const.NVL(value, "")); + } + } + } + + /** + * Copy parameters from a NamedParams object. + * + * @param params the NamedParams object from which to copy the parameters + * @see org.pentaho.di.core.parameters.NamedParams#copyParametersFrom(org.pentaho.di.core.parameters.NamedParams) + */ + public void copyParametersFrom(NamedParams params) { + namedParams.copyParametersFrom(params); + } + + /** + * Gets the parent transformation, which is null if no parent transformation exists. + * + * @return a reference to the parent transformation's Trans object, or null if no parent transformation exists + */ + public Trans getParentTrans() { + return parentTrans; + } + + /** + * Sets the parent transformation. + * + * @param parentTrans the parentTrans to set + */ + public void setParentTrans(Trans parentTrans) { + this.logLevel = parentTrans.getLogLevel(); + this.log.setLogLevel(logLevel); + this.parentTrans = parentTrans; + + transactionId = calculateTransactionId(); + } + + /** + * Gets the mapping step name. + * + * @return the name of the mapping step that created this transformation + */ + public String getMappingStepName() { + return mappingStepName; + } + + /** + * Sets the mapping step name. + * + * @param mappingStepName the name of the mapping step that created this transformation + */ + public void setMappingStepName(String mappingStepName) { + this.mappingStepName = mappingStepName; + } + + /** + * Sets the socket repository. + * + * @param socketRepository the new socket repository + */ + public void setSocketRepository(SocketRepository socketRepository) { + this.socketRepository = socketRepository; + } + + /** + * Gets the socket repository. + * + * @return the socket repository + */ + public SocketRepository getSocketRepository() { + return socketRepository; + } + + /** + * Gets the object name. + * + * @return the object name + * @see org.pentaho.di.core.logging.LoggingObjectInterface#getObjectName() + */ + public String getObjectName() { + return getName(); + } + + /** + * Gets the object copy. For Trans, this always returns null + * + * @return null + * @see org.pentaho.di.core.logging.LoggingObjectInterface#getObjectCopy() + */ + public String getObjectCopy() { + return null; + } + + /** + * Gets the filename of the transformation, or null if no filename exists + * + * @return the filename + * @see org.pentaho.di.core.logging.LoggingObjectInterface#getFilename() + */ + public String getFilename() { + if (transMeta == null) { + return null; + } + return transMeta.getFilename(); + } + + /** + * Gets the log channel ID. + * + * @return the log channel ID + * @see org.pentaho.di.core.logging.LoggingObjectInterface#getLogChannelId() + */ + public String getLogChannelId() { + return log.getLogChannelId(); + } + + /** + * Gets the object ID. + * + * @return the object ID + * @see org.pentaho.di.core.logging.LoggingObjectInterface#getObjectId() + */ + public ObjectId getObjectId() { + if (transMeta == null) { + return null; + } + return transMeta.getObjectId(); + } + + /** + * Gets the object revision. + * + * @return the object revision + * @see org.pentaho.di.core.logging.LoggingObjectInterface#getObjectRevision() + */ + public ObjectRevision getObjectRevision() { + if (transMeta == null) { + return null; + } + return transMeta.getObjectRevision(); + } + + /** + * Gets the object type. For Trans, this always returns LoggingObjectType.TRANS + * + * @return the object type + * @see org.pentaho.di.core.logging.LoggingObjectInterface#getObjectType() + */ + public LoggingObjectType getObjectType() { + return LoggingObjectType.TRANS; + } + + /** + * Gets the parent logging object interface. + * + * @return the parent + * @see org.pentaho.di.core.logging.LoggingObjectInterface#getParent() + */ + public LoggingObjectInterface getParent() { + return parent; + } + + /** + * Gets the repository directory. + * + * @return the repository directory + * @see org.pentaho.di.core.logging.LoggingObjectInterface#getRepositoryDirectory() + */ + public RepositoryDirectoryInterface getRepositoryDirectory() { + if (transMeta == null) { + return null; + } + return transMeta.getRepositoryDirectory(); + } + + /** + * Gets the log level. + * + * @return the log level + * @see org.pentaho.di.core.logging.LoggingObjectInterface#getLogLevel() + */ + public LogLevel getLogLevel() { + return logLevel; + } + + /** + * Sets the log level. + * + * @param logLevel the new log level + */ + public void setLogLevel(LogLevel logLevel) { + this.logLevel = logLevel; + log.setLogLevel(logLevel); + } + + /** + * Gets the logging hierarchy. + * + * @return the logging hierarchy + */ + public List getLoggingHierarchy() { + List hierarchy = new ArrayList(); + List childIds = LoggingRegistry.getInstance().getLogChannelChildren(getLogChannelId()); + for (String childId : childIds) { + LoggingObjectInterface loggingObject = LoggingRegistry.getInstance().getLoggingObject(childId); + if (loggingObject != null) { + hierarchy.add(new LoggingHierarchy(getLogChannelId(), batchId, loggingObject)); + } } - try { - Thread.sleep( sleepTimeSeconds * 2000 ); - } catch ( Exception e ) { - // Ignore errors - } // Check all slaves every x seconds. - } - } - - log.logBasic( "All transformations in the cluster have finished." ); - - errors += cleanupCluster( log, transSplitter ); - - return errors; - } - - /** - * Cleanup the cluster, including the master and all slaves, and return the number of errors that occurred. - * - * @param log - * the log channel interface - * @param transSplitter - * the TransSplitter object - * @return the number of errors that occurred in the clustered transformation - */ - public static int cleanupCluster( LogChannelInterface log, TransSplitter transSplitter ) { - - SlaveServer[] slaveServers = transSplitter.getSlaveTargets(); - TransMeta[] slaves = transSplitter.getSlaves(); - SlaveServer masterServer; - try { - masterServer = transSplitter.getMasterServer(); - } catch ( KettleException e ) { - log.logError( "Unable to obtain the master server from the cluster", e ); - return 1; - } - TransMeta masterTransMeta = transSplitter.getMaster(); - int errors = 0; - - // All transformations have finished, with or without error. - // Now run a cleanup on all the transformation on the master and the slaves. - // - // Slaves first... - // - for ( int s = 0; s < slaveServers.length; s++ ) { - try { - cleanupSlaveServer( transSplitter, slaveServers[s], slaves[s] ); - } catch ( Exception e ) { - errors++; - log.logError( "Unable to contact slave server '" + slaveServers[s].getName() - + "' to clean up slave transformation", e ); - } - } - - // Clean up the master too - // - if ( masterTransMeta != null && masterTransMeta.nrSteps() > 0 ) { - try { - cleanupSlaveServer( transSplitter, masterServer, masterTransMeta ); - } catch ( Exception e ) { - errors++; - log.logError( "Unable to contact master server '" + masterServer.getName() - + "' to clean up master transformation", e ); - } - - // Also de-allocate all ports used for this clustered transformation on the master. - // - try { - // Deallocate all ports belonging to this clustered run, not anything else - // - masterServer.deAllocateServerSockets( transSplitter.getOriginalTransformation().getName(), transSplitter - .getClusteredRunId() ); - } catch ( Exception e ) { - errors++; - log.logError( "Unable to contact master server '" + masterServer.getName() - + "' to clean up port sockets for transformation'" + transSplitter.getOriginalTransformation().getName() - + "'", e ); - } - } - - return errors; - } - - /** - * Cleanup the slave server as part of a clustered transformation. - * - * @param transSplitter - * the TransSplitter object - * @param slaveServer - * the slave server - * @param slaveTransMeta - * the slave transformation meta-data - * @throws KettleException - * if any errors occur during cleanup - */ - public static void cleanupSlaveServer( TransSplitter transSplitter, SlaveServer slaveServer, - TransMeta slaveTransMeta ) throws KettleException { - String transName = slaveTransMeta.getName(); - try { - String carteObjectId = transSplitter.getCarteObjectMap().get( slaveTransMeta ); - WebResult webResult = slaveServer.cleanupTransformation( transName, carteObjectId ); - if ( !WebResult.STRING_OK.equals( webResult.getResult() ) ) { - throw new KettleException( "Unable to run clean-up on slave server '" + slaveServer + "' for transformation '" - + transName + "' : " + webResult.getMessage() ); - } - } catch ( Exception e ) { - throw new KettleException( "Unexpected error contacting slave server '" + slaveServer - + "' to clear up transformation '" + transName + "'", e ); - } - } - - /** - * Gets the clustered transformation result. - * - * @param log - * the log channel interface - * @param transSplitter - * the TransSplitter object - * @param parentJob - * the parent job - * @return the clustered transformation result - */ - public static final Result getClusteredTransformationResult( LogChannelInterface log, TransSplitter transSplitter, - Job parentJob ) { - return getClusteredTransformationResult( log, transSplitter, parentJob, false ); - } - - /** - * Gets the clustered transformation result. - * - * @param log - * the log channel interface - * @param transSplitter - * the TransSplitter object - * @param parentJob - * the parent job - * @param loggingRemoteWork - * log remote execution logs locally - * @return the clustered transformation result - */ - public static final Result getClusteredTransformationResult( LogChannelInterface log, TransSplitter transSplitter, - Job parentJob, boolean loggingRemoteWork ) { - Result result = new Result(); - // - // See if the remote transformations have finished. - // We could just look at the master, but I doubt that that is enough in all situations. - // - SlaveServer[] slaveServers = transSplitter.getSlaveTargets(); // <-- ask these guys - TransMeta[] slaves = transSplitter.getSlaves(); - - SlaveServer masterServer; - try { - masterServer = transSplitter.getMasterServer(); - } catch ( KettleException e ) { - log.logError( "Error getting the master server", e ); - masterServer = null; - result.setNrErrors( result.getNrErrors() + 1 ); - } - TransMeta master = transSplitter.getMaster(); - - // Slaves first... - // - for ( int s = 0; s < slaveServers.length; s++ ) { - try { - // Get the detailed status of the slave transformation... - // - SlaveServerTransStatus transStatus = slaveServers[s].getTransStatus( slaves[s].getName(), "", 0 ); - Result transResult = transStatus.getResult( slaves[s] ); - result.add( transResult ); + return hierarchy; + } + + /** + * Gets the active sub-transformations. + * + * @return a map (by name) of the active sub-transformations + */ + public Map getActiveSubtransformations() { + return activeSubtransformations; + } + + /** + * Gets the active sub-jobs. + * + * @return a map (by name) of the active sub-jobs + */ + public Map getActiveSubjobs() { + return activeSubjobs; + } + + /** + * Gets the container object ID. + * + * @return the Carte object ID + */ + public String getContainerObjectId() { + return containerObjectId; + } + + /** + * Sets the container object ID. + * + * @param containerObjectId the Carte object ID to set + */ + public void setContainerObjectId(String containerObjectId) { + this.containerObjectId = containerObjectId; + } + + /** + * Gets the registration date. For Trans, this always returns null + * + * @return null + */ + public Date getRegistrationDate() { + return null; + } + + /** + * Sets the servlet print writer. + * + * @param servletPrintWriter the new servlet print writer + */ + public void setServletPrintWriter(PrintWriter servletPrintWriter) { + this.servletPrintWriter = servletPrintWriter; + } + + /** + * Gets the servlet print writer. + * + * @return the servlet print writer + */ + public PrintWriter getServletPrintWriter() { + return servletPrintWriter; + } + + /** + * Gets the name of the executing server. + * + * @return the executingServer + */ + public String getExecutingServer() { + return executingServer; + } + + /** + * Sets the name of the executing server. + * + * @param executingServer the executingServer to set + */ + public void setExecutingServer(String executingServer) { + this.executingServer = executingServer; + } + + /** + * Gets the name of the executing user. + * + * @return the executingUser + */ + public String getExecutingUser() { + return executingUser; + } + + /** + * Sets the name of the executing user. + * + * @param executingUser the executingUser to set + */ + public void setExecutingUser(String executingUser) { + this.executingUser = executingUser; + } + + @Override + public boolean isGatheringMetrics() { + return log != null && log.isGatheringMetrics(); + } + + @Override + public void setGatheringMetrics(boolean gatheringMetrics) { + if (log != null) { + log.setGatheringMetrics(gatheringMetrics); + } + } + + @Override + public boolean isForcingSeparateLogging() { + return log != null && log.isForcingSeparateLogging(); + } - if ( loggingRemoteWork ) { - log.logBasic( "-- Slave : " + slaveServers[s].getName() ); - log.logBasic( transStatus.getLoggingString() ); + @Override + public void setForcingSeparateLogging(boolean forcingSeparateLogging) { + if (log != null) { + log.setForcingSeparateLogging(forcingSeparateLogging); } - } catch ( Exception e ) { - result.setNrErrors( result.getNrErrors() + 1 ); - log.logError( "Unable to contact slave server '" + slaveServers[s].getName() - + "' to get result of slave transformation : " + e.toString() ); - } } - // Clean up the master too - // - if ( master != null && master.nrSteps() > 0 ) { - try { - // Get the detailed status of the slave transformation... - // - SlaveServerTransStatus transStatus = masterServer.getTransStatus( master.getName(), "", 0 ); - Result transResult = transStatus.getResult( master ); - - result.add( transResult ); - - if ( loggingRemoteWork ) { - log.logBasic( "-- Master : " + masterServer.getName() ); - log.logBasic( transStatus.getLoggingString() ); - } - } catch ( Exception e ) { - result.setNrErrors( result.getNrErrors() + 1 ); - log.logError( "Unable to contact master server '" + masterServer.getName() - + "' to get result of master transformation : " + e.toString() ); - } - } - - return result; - } - - /** - * Send the transformation for execution to a Carte slave server. - * - * @param transMeta - * the transformation meta-data - * @param executionConfiguration - * the transformation execution configuration - * @param repository - * the repository - * @return The Carte object ID on the server. - * @throws KettleException - * if any errors occur during the dispatch to the slave server - */ - public static String sendToSlaveServer( TransMeta transMeta, TransExecutionConfiguration executionConfiguration, - Repository repository, IMetaStore metaStore ) throws KettleException { - String carteObjectId; - SlaveServer slaveServer = executionConfiguration.getRemoteServer(); - - if ( slaveServer == null ) { - throw new KettleException( "No slave server specified" ); - } - if ( Const.isEmpty( transMeta.getName() ) ) { - throw new KettleException( "The transformation needs a name to uniquely identify it by on the remote server." ); - } - - try { - // Inject certain internal variables to make it more intuitive. - // - Map vars = new HashMap(); - - for ( String var : Const.INTERNAL_TRANS_VARIABLES ) { - vars.put( var, transMeta.getVariable( var ) ); - } - for ( String var : Const.INTERNAL_JOB_VARIABLES ) { - vars.put( var, transMeta.getVariable( var ) ); - } - - executionConfiguration.getVariables().putAll( vars ); - slaveServer.injectVariables( executionConfiguration.getVariables() ); - - slaveServer.getLogChannel().setLogLevel( executionConfiguration.getLogLevel() ); - - if ( executionConfiguration.isPassingExport() ) { - - // First export the job... - // - FileObject tempFile = - KettleVFS.createTempFile( "transExport", ".zip", System.getProperty( "java.io.tmpdir" ), transMeta ); + public List getResultFiles() { + return resultFiles; + } - TopLevelResource topLevelResource = - ResourceUtil.serializeResourceExportInterface( tempFile.getName().toString(), transMeta, transMeta, - repository, metaStore, executionConfiguration.getXML(), CONFIGURATION_IN_EXPORT_FILENAME ); + public void setResultFiles(List resultFiles) { + this.resultFiles = resultFiles; + } - // Send the zip file over to the slave server... - // - String result = - slaveServer.sendExport( topLevelResource.getArchiveName(), AddExportServlet.TYPE_TRANS, topLevelResource - .getBaseResourceName() ); - WebResult webResult = WebResult.fromXMLString( result ); - if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) { - throw new KettleException( "There was an error passing the exported transformation to the remote server: " - + Const.CR + webResult.getMessage() ); - } - carteObjectId = webResult.getId(); - } else { - - // Now send it off to the remote server... - // - String xml = new TransConfiguration( transMeta, executionConfiguration ).getXML(); - String reply = slaveServer.sendXML( xml, RegisterTransServlet.CONTEXT_PATH + "/?xml=Y" ); - WebResult webResult = WebResult.fromXMLString( reply ); - if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) { - throw new KettleException( "There was an error posting the transformation on the remote server: " + Const.CR - + webResult.getMessage() ); - } - carteObjectId = webResult.getId(); - } - - // Prepare the transformation - // - String reply = - slaveServer.execService( PrepareExecutionTransServlet.CONTEXT_PATH + "/?name=" + URLEncoder.encode( transMeta - .getName(), "UTF-8" ) + "&xml=Y&id=" + carteObjectId ); - WebResult webResult = WebResult.fromXMLString( reply ); - if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) { - throw new KettleException( "There was an error preparing the transformation for excution on the remote server: " - + Const.CR + webResult.getMessage() ); - } - - // Start the transformation - // - reply = - slaveServer.execService( StartExecutionTransServlet.CONTEXT_PATH + "/?name=" + URLEncoder.encode( transMeta - .getName(), "UTF-8" ) + "&xml=Y&id=" + carteObjectId ); - webResult = WebResult.fromXMLString( reply ); - - if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) { - throw new KettleException( "There was an error starting the transformation on the remote server: " + Const.CR - + webResult.getMessage() ); - } - - return carteObjectId; - } catch ( KettleException ke ) { - throw ke; - } catch ( Exception e ) { - throw new KettleException( e ); - } - } - - /** - * Checks whether the transformation is ready to start (i.e. execution preparation was successful) - * - * @return true if the transformation was prepared for execution successfully, false otherwise - * @see org.pentaho.di.trans.Trans#prepareExecution(String[]) - */ - public boolean isReadyToStart() { - return readyToStart; - } - - /** - * Sets the internal kettle variables. - * - * @param var - * the new internal kettle variables - */ - public void setInternalKettleVariables( VariableSpace var ) { - if ( transMeta != null && !Const.isEmpty( transMeta.getFilename() ) ) { // we have a finename that's defined. - try { - FileObject fileObject = KettleVFS.getFileObject( transMeta.getFilename(), var ); - FileName fileName = fileObject.getName(); - - // The filename of the transformation - variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_NAME, fileName.getBaseName() ); - - // The directory of the transformation - FileName fileDir = fileName.getParent(); - variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY, fileDir.getURI() ); - } catch ( KettleFileException e ) { - variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY, "" ); - variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_NAME, "" ); - } - } else { - variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY, "" ); - variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_NAME, "" ); - } - - boolean hasRepoDir = transMeta.getRepositoryDirectory() != null && transMeta.getRepository() != null; - - // The name of the transformation - variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_NAME, Const.NVL( transMeta.getName(), "" ) ); - - // setup fallbacks - if ( hasRepoDir ) { - variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY, variables.getVariable( - Const.INTERNAL_VARIABLE_TRANSFORMATION_REPOSITORY_DIRECTORY ) ); - } else { - variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_REPOSITORY_DIRECTORY, variables.getVariable( - Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY ) ); - } - - // TODO PUT THIS INSIDE OF THE "IF" - // The name of the directory in the repository - variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_REPOSITORY_DIRECTORY, transMeta - .getRepositoryDirectory() != null ? transMeta.getRepositoryDirectory().getPath() : "" ); - - // Here we don't clear the definition of the job specific parameters, as they may come in handy. - // A transformation can be called from a job and may inherit the job internal variables - // but the other around is not possible. - - if ( hasRepoDir ) { - variables.setVariable( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY, variables.getVariable( - Const.INTERNAL_VARIABLE_TRANSFORMATION_REPOSITORY_DIRECTORY ) ); - if ( "/".equals( variables.getVariable( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY ) ) ) { - variables.setVariable( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY, "" ); - } - } else { - variables.setVariable( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY, variables.getVariable( - Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY ) ); - } - } - - /** - * Copies variables from a given variable space to this transformation. - * - * @param space - * the variable space - * @see org.pentaho.di.core.variables.VariableSpace#copyVariablesFrom(org.pentaho.di.core.variables.VariableSpace) - */ - public void copyVariablesFrom( VariableSpace space ) { - variables.copyVariablesFrom( space ); - } - - /** - * Substitutes any variable values into the given string, and returns the resolved string. - * - * @param aString - * the string to resolve against environment variables - * @return the string after variables have been resolved/susbstituted - * @see org.pentaho.di.core.variables.VariableSpace#environmentSubstitute(java.lang.String) - */ - public String environmentSubstitute( String aString ) { - return variables.environmentSubstitute( aString ); - } - - /** - * Substitutes any variable values into each of the given strings, and returns an array containing the resolved - * string(s). - * - * @param aString - * an array of strings to resolve against environment variables - * @return the array of strings after variables have been resolved/susbstituted - * @see org.pentaho.di.core.variables.VariableSpace#environmentSubstitute(java.lang.String[]) - */ - public String[] environmentSubstitute( String[] aString ) { - return variables.environmentSubstitute( aString ); - } - - public String fieldSubstitute( String aString, RowMetaInterface rowMeta, Object[] rowData ) - throws KettleValueException { - return variables.fieldSubstitute( aString, rowMeta, rowData ); - } - - /** - * Gets the parent variable space. - * - * @return the parent variable space - * @see org.pentaho.di.core.variables.VariableSpace#getParentVariableSpace() - */ - public VariableSpace getParentVariableSpace() { - return variables.getParentVariableSpace(); - } - - /** - * Sets the parent variable space. - * - * @param parent - * the new parent variable space - * @see org.pentaho.di.core.variables.VariableSpace#setParentVariableSpace( - * org.pentaho.di.core.variables.VariableSpace) - */ - public void setParentVariableSpace( VariableSpace parent ) { - variables.setParentVariableSpace( parent ); - } - - /** - * Gets the value of the specified variable, or returns a default value if no such variable exists. - * - * @param variableName - * the variable name - * @param defaultValue - * the default value - * @return the value of the specified variable, or returns a default value if no such variable exists - * @see org.pentaho.di.core.variables.VariableSpace#getVariable(java.lang.String, java.lang.String) - */ - public String getVariable( String variableName, String defaultValue ) { - return variables.getVariable( variableName, defaultValue ); - } - - /** - * Gets the value of the specified variable, or returns a default value if no such variable exists. - * - * @param variableName - * the variable name - * @return the value of the specified variable, or returns a default value if no such variable exists - * @see org.pentaho.di.core.variables.VariableSpace#getVariable(java.lang.String) - */ - public String getVariable( String variableName ) { - return variables.getVariable( variableName ); - } - - /** - * Returns a boolean representation of the specified variable after performing any necessary substitution. Truth - * values include case-insensitive versions of "Y", "YES", "TRUE" or "1". - * - * @param variableName - * the variable name - * @param defaultValue - * the default value - * @return a boolean representation of the specified variable after performing any necessary substitution - * @see org.pentaho.di.core.variables.VariableSpace#getBooleanValueOfVariable(java.lang.String, boolean) - */ - public boolean getBooleanValueOfVariable( String variableName, boolean defaultValue ) { - if ( !Const.isEmpty( variableName ) ) { - String value = environmentSubstitute( variableName ); - if ( !Const.isEmpty( value ) ) { - return ValueMeta.convertStringToBoolean( value ); - } - } - return defaultValue; - } - - /** - * Sets the values of the transformation's variables to the values from the parent variables. - * - * @param parent - * the parent - * @see org.pentaho.di.core.variables.VariableSpace#initializeVariablesFrom( - * org.pentaho.di.core.variables.VariableSpace) - */ - public void initializeVariablesFrom( VariableSpace parent ) { - variables.initializeVariablesFrom( parent ); - } - - /** - * Gets a list of variable names for the transformation. - * - * @return a list of variable names - * @see org.pentaho.di.core.variables.VariableSpace#listVariables() - */ - public String[] listVariables() { - return variables.listVariables(); - } - - /** - * Sets the value of the specified variable to the specified value. - * - * @param variableName - * the variable name - * @param variableValue - * the variable value - * @see org.pentaho.di.core.variables.VariableSpace#setVariable(java.lang.String, java.lang.String) - */ - public void setVariable( String variableName, String variableValue ) { - variables.setVariable( variableName, variableValue ); - } - - /** - * Shares a variable space from another variable space. This means that the object should take over the space used as - * argument. - * - * @param space - * the variable space - * @see org.pentaho.di.core.variables.VariableSpace#shareVariablesWith(org.pentaho.di.core.variables.VariableSpace) - */ - public void shareVariablesWith( VariableSpace space ) { - variables = space; - } - - /** - * Injects variables using the given Map. The behavior should be that the properties object will be stored and at the - * time the VariableSpace is initialized (or upon calling this method if the space is already initialized). After - * injecting the link of the properties object should be removed. - * - * @param prop - * the property map - * @see org.pentaho.di.core.variables.VariableSpace#injectVariables(java.util.Map) - */ - public void injectVariables( Map prop ) { - variables.injectVariables( prop ); - } - - /** - * Pauses the transformation (pause all steps). - */ - public void pauseRunning() { - paused.set( true ); - for ( StepMetaDataCombi combi : steps ) { - combi.step.pauseRunning(); - } - } - - /** - * Resumes running the transformation after a pause (resume all steps). - */ - public void resumeRunning() { - for ( StepMetaDataCombi combi : steps ) { - combi.step.resumeRunning(); - } - paused.set( false ); - } - - /** - * Checks whether the transformation is being previewed. - * - * @return true if the transformation is being previewed, false otherwise - */ - public boolean isPreview() { - return preview; - } - - /** - * Sets whether the transformation is being previewed. - * - * @param preview - * true if the transformation is being previewed, false otherwise - */ - public void setPreview( boolean preview ) { - this.preview = preview; - } - - /** - * Gets the repository object for the transformation. - * - * @return the repository - */ - public Repository getRepository() { - - if ( repository == null ) { - // Does the transmeta have a repo? - // This is a valid case, when a non-repo trans is attempting to retrieve - // a transformation in the repository. - if ( transMeta != null ) { - return transMeta.getRepository(); - } - } - return repository; - } - - /** - * Sets the repository object for the transformation. - * - * @param repository - * the repository object to set - */ - public void setRepository( Repository repository ) { - this.repository = repository; - if ( transMeta != null ) { - transMeta.setRepository( repository ); - } - } - - /** - * Gets a named list (map) of step performance snapshots. - * - * @return a named list (map) of step performance snapshots - */ - public Map> getStepPerformanceSnapShots() { - return stepPerformanceSnapShots; - } - - /** - * Sets the named list (map) of step performance snapshots. - * - * @param stepPerformanceSnapShots - * a named list (map) of step performance snapshots to set - */ - public void setStepPerformanceSnapShots( Map> stepPerformanceSnapShots ) { - this.stepPerformanceSnapShots = stepPerformanceSnapShots; - } - - /** - * Gets a list of the transformation listeners. Please do not attempt to modify this list externally. Returned list is - * mutable only for backward compatibility purposes. - * - * @return the transListeners - */ - public List getTransListeners() { - return transListeners; - } - - /** - * Sets the list of transformation listeners. - * - * @param transListeners - * the transListeners to set - */ - public void setTransListeners( List transListeners ) { - this.transListeners = Collections.synchronizedList( transListeners ); - } - - /** - * Adds a transformation listener. - * - * @param transListener - * the trans listener - */ - public void addTransListener( TransListener transListener ) { - // PDI-5229 sync added - synchronized ( transListeners ) { - transListeners.add( transListener ); - } - } - - /** - * Sets the list of stop-event listeners for the transformation. - * - * @param transStoppedListeners - * the list of stop-event listeners to set - */ - public void setTransStoppedListeners( List transStoppedListeners ) { - this.transStoppedListeners = Collections.synchronizedList( transStoppedListeners ); - } - - /** - * Gets the list of stop-event listeners for the transformation. This is not concurrent safe. Please note this is - * mutable implementation only for backward compatibility reasons. - * - * @return the list of stop-event listeners - */ - public List getTransStoppedListeners() { - return transStoppedListeners; - } - - /** - * Adds a stop-event listener to the transformation. - * - * @param transStoppedListener - * the stop-event listener to add - */ - public void addTransStoppedListener( TransStoppedListener transStoppedListener ) { - transStoppedListeners.add( transStoppedListener ); - } - - /** - * Checks if the transformation is paused. - * - * @return true if the transformation is paused, false otherwise - */ - public boolean isPaused() { - return paused.get(); - } - - /** - * Checks if the transformation is stopped. - * - * @return true if the transformation is stopped, false otherwise - */ - public boolean isStopped() { - return stopped.get(); - } - - /** - * Monitors a remote transformation every 5 seconds. - * - * @param log - * the log channel interface - * @param carteObjectId - * the Carte object ID - * @param transName - * the transformation name - * @param remoteSlaveServer - * the remote slave server - */ - public static void monitorRemoteTransformation( LogChannelInterface log, String carteObjectId, String transName, - SlaveServer remoteSlaveServer ) { - monitorRemoteTransformation( log, carteObjectId, transName, remoteSlaveServer, 5 ); - } - - /** - * Monitors a remote transformation at the specified interval. - * - * @param log - * the log channel interface - * @param carteObjectId - * the Carte object ID - * @param transName - * the transformation name - * @param remoteSlaveServer - * the remote slave server - * @param sleepTimeSeconds - * the sleep time (in seconds) - */ - public static void monitorRemoteTransformation( LogChannelInterface log, String carteObjectId, String transName, - SlaveServer remoteSlaveServer, int sleepTimeSeconds ) { - long errors = 0; - boolean allFinished = false; - while ( !allFinished && errors == 0 ) { - allFinished = true; - errors = 0L; - - // Check the remote server - if ( allFinished && errors == 0 ) { - try { - SlaveServerTransStatus transStatus = remoteSlaveServer.getTransStatus( transName, carteObjectId, 0 ); - if ( transStatus.isRunning() ) { - if ( log.isDetailed() ) { - log.logDetailed( transName, "Remote transformation is still running." ); - } - allFinished = false; - } else { - if ( log.isDetailed() ) { - log.logDetailed( transName, "Remote transformation has finished." ); - } - } - Result result = transStatus.getResult(); - errors += result.getNrErrors(); - } catch ( Exception e ) { - errors += 1; - log.logError( transName, "Unable to contact remote slave server '" + remoteSlaveServer.getName() - + "' to check transformation status : " + e.toString() ); - } - } - - // - // Keep waiting until all transformations have finished - // If needed, we stop them again and again until they yield. - // - if ( !allFinished ) { - // Not finished or error: wait a bit longer - if ( log.isDetailed() ) { - log.logDetailed( transName, "The remote transformation is still running, waiting a few seconds..." ); + public List getResultRows() { + return resultRows; + } + + public void setResultRows(List resultRows) { + this.resultRows = resultRows; + } + + public Result getPreviousResult() { + return previousResult; + } + + public void setPreviousResult(Result previousResult) { + this.previousResult = previousResult; + } + + public Hashtable getCounters() { + return counters; + } + + public void setCounters(Hashtable counters) { + this.counters = counters; + } + + public String[] getArguments() { + return arguments; + } + + public void setArguments(String[] arguments) { + this.arguments = arguments; + } + + /** + * Clear the error in the transformation, clear all the rows from all the row sets, to make sure the transformation + * can continue with other data. This is intended for use when running single threaded. + */ + public void clearError() { + stopped.set(false); + errors.set(0); + setFinished(false); + for (StepMetaDataCombi combi : steps) { + StepInterface step = combi.step; + for (RowSet rowSet : step.getInputRowSets()) { + rowSet.clear(); + } + step.setStopped(false); } - try { - Thread.sleep( sleepTimeSeconds * 1000 ); - } catch ( Exception e ) { - // Ignore errors - } // Check all slaves every x seconds. - } - } - - log.logMinimal( transName, "The remote transformation has finished." ); - - // Clean up the remote transformation - // - try { - WebResult webResult = remoteSlaveServer.cleanupTransformation( transName, carteObjectId ); - if ( !WebResult.STRING_OK.equals( webResult.getResult() ) ) { - log.logError( transName, "Unable to run clean-up on remote transformation '" + transName + "' : " + webResult - .getMessage() ); - errors += 1; - } - } catch ( Exception e ) { - errors += 1; - log.logError( transName, "Unable to contact slave server '" + remoteSlaveServer.getName() - + "' to clean up transformation : " + e.toString() ); - } - } - - /** - * Adds a parameter definition to this transformation. - * - * @param key - * the name of the parameter - * @param defValue - * the default value for the parameter - * @param description - * the description of the parameter - * @throws DuplicateParamException - * the duplicate param exception - * @see org.pentaho.di.core.parameters.NamedParams#addParameterDefinition(java.lang.String, java.lang.String, - * java.lang.String) - */ - public void addParameterDefinition( String key, String defValue, String description ) throws DuplicateParamException { - namedParams.addParameterDefinition( key, defValue, description ); - } - - /** - * Gets the default value of the specified parameter. - * - * @param key - * the name of the parameter - * @return the default value of the parameter - * @throws UnknownParamException - * if the parameter does not exist - * @see org.pentaho.di.core.parameters.NamedParams#getParameterDefault(java.lang.String) - */ - public String getParameterDefault( String key ) throws UnknownParamException { - return namedParams.getParameterDefault( key ); - } - - /** - * Gets the description of the specified parameter. - * - * @param key - * the name of the parameter - * @return the parameter description - * @throws UnknownParamException - * if the parameter does not exist - * @see org.pentaho.di.core.parameters.NamedParams#getParameterDescription(java.lang.String) - */ - public String getParameterDescription( String key ) throws UnknownParamException { - return namedParams.getParameterDescription( key ); - } - - /** - * Gets the value of the specified parameter. - * - * @param key - * the name of the parameter - * @return the parameter value - * @throws UnknownParamException - * if the parameter does not exist - * @see org.pentaho.di.core.parameters.NamedParams#getParameterValue(java.lang.String) - */ - public String getParameterValue( String key ) throws UnknownParamException { - return namedParams.getParameterValue( key ); - } - - /** - * Gets a list of the parameters for the transformation. - * - * @return an array of strings containing the names of all parameters for the transformation - * @see org.pentaho.di.core.parameters.NamedParams#listParameters() - */ - public String[] listParameters() { - return namedParams.listParameters(); - } - - /** - * Sets the value for the specified parameter. - * - * @param key - * the name of the parameter - * @param value - * the name of the value - * @throws UnknownParamException - * if the parameter does not exist - * @see org.pentaho.di.core.parameters.NamedParams#setParameterValue(java.lang.String, java.lang.String) - */ - public void setParameterValue( String key, String value ) throws UnknownParamException { - namedParams.setParameterValue( key, value ); - } - - /** - * Remove all parameters. - * - * @see org.pentaho.di.core.parameters.NamedParams#eraseParameters() - */ - public void eraseParameters() { - namedParams.eraseParameters(); - } - - /** - * Clear the values of all parameters. - * - * @see org.pentaho.di.core.parameters.NamedParams#clearParameters() - */ - public void clearParameters() { - namedParams.clearParameters(); - } - - /** - * Activates all parameters by setting their values. If no values already exist, the method will attempt to set the - * parameter to the default value. If no default value exists, the method will set the value of the parameter to the - * empty string (""). - * - * @see org.pentaho.di.core.parameters.NamedParams#activateParameters() - */ - public void activateParameters() { - String[] keys = listParameters(); - - for ( String key : keys ) { - String value; - try { - value = getParameterValue( key ); - } catch ( UnknownParamException e ) { - value = ""; - } - - String defValue; - try { - defValue = getParameterDefault( key ); - } catch ( UnknownParamException e ) { - defValue = ""; - } - - if ( Const.isEmpty( value ) ) { - setVariable( key, Const.NVL( defValue, "" ) ); - } else { - setVariable( key, Const.NVL( value, "" ) ); - } - } - } - - /** - * Copy parameters from a NamedParams object. - * - * @param params - * the NamedParams object from which to copy the parameters - * @see org.pentaho.di.core.parameters.NamedParams#copyParametersFrom(org.pentaho.di.core.parameters.NamedParams) - */ - public void copyParametersFrom( NamedParams params ) { - namedParams.copyParametersFrom( params ); - } - - /** - * Gets the parent transformation, which is null if no parent transformation exists. - * - * @return a reference to the parent transformation's Trans object, or null if no parent transformation exists - */ - public Trans getParentTrans() { - return parentTrans; - } - - /** - * Sets the parent transformation. - * - * @param parentTrans - * the parentTrans to set - */ - public void setParentTrans( Trans parentTrans ) { - this.logLevel = parentTrans.getLogLevel(); - this.log.setLogLevel( logLevel ); - this.parentTrans = parentTrans; - - transactionId = calculateTransactionId(); - } - - /** - * Gets the mapping step name. - * - * @return the name of the mapping step that created this transformation - */ - public String getMappingStepName() { - return mappingStepName; - } - - /** - * Sets the mapping step name. - * - * @param mappingStepName - * the name of the mapping step that created this transformation - */ - public void setMappingStepName( String mappingStepName ) { - this.mappingStepName = mappingStepName; - } - - /** - * Sets the socket repository. - * - * @param socketRepository - * the new socket repository - */ - public void setSocketRepository( SocketRepository socketRepository ) { - this.socketRepository = socketRepository; - } - - /** - * Gets the socket repository. - * - * @return the socket repository - */ - public SocketRepository getSocketRepository() { - return socketRepository; - } - - /** - * Gets the object name. - * - * @return the object name - * @see org.pentaho.di.core.logging.LoggingObjectInterface#getObjectName() - */ - public String getObjectName() { - return getName(); - } - - /** - * Gets the object copy. For Trans, this always returns null - * - * @return null - * @see org.pentaho.di.core.logging.LoggingObjectInterface#getObjectCopy() - */ - public String getObjectCopy() { - return null; - } - - /** - * Gets the filename of the transformation, or null if no filename exists - * - * @return the filename - * @see org.pentaho.di.core.logging.LoggingObjectInterface#getFilename() - */ - public String getFilename() { - if ( transMeta == null ) { - return null; - } - return transMeta.getFilename(); - } - - /** - * Gets the log channel ID. - * - * @return the log channel ID - * @see org.pentaho.di.core.logging.LoggingObjectInterface#getLogChannelId() - */ - public String getLogChannelId() { - return log.getLogChannelId(); - } - - /** - * Gets the object ID. - * - * @return the object ID - * @see org.pentaho.di.core.logging.LoggingObjectInterface#getObjectId() - */ - public ObjectId getObjectId() { - if ( transMeta == null ) { - return null; - } - return transMeta.getObjectId(); - } - - /** - * Gets the object revision. - * - * @return the object revision - * @see org.pentaho.di.core.logging.LoggingObjectInterface#getObjectRevision() - */ - public ObjectRevision getObjectRevision() { - if ( transMeta == null ) { - return null; - } - return transMeta.getObjectRevision(); - } - - /** - * Gets the object type. For Trans, this always returns LoggingObjectType.TRANS - * - * @return the object type - * @see org.pentaho.di.core.logging.LoggingObjectInterface#getObjectType() - */ - public LoggingObjectType getObjectType() { - return LoggingObjectType.TRANS; - } - - /** - * Gets the parent logging object interface. - * - * @return the parent - * @see org.pentaho.di.core.logging.LoggingObjectInterface#getParent() - */ - public LoggingObjectInterface getParent() { - return parent; - } - - /** - * Gets the repository directory. - * - * @return the repository directory - * @see org.pentaho.di.core.logging.LoggingObjectInterface#getRepositoryDirectory() - */ - public RepositoryDirectoryInterface getRepositoryDirectory() { - if ( transMeta == null ) { - return null; - } - return transMeta.getRepositoryDirectory(); - } - - /** - * Gets the log level. - * - * @return the log level - * @see org.pentaho.di.core.logging.LoggingObjectInterface#getLogLevel() - */ - public LogLevel getLogLevel() { - return logLevel; - } - - /** - * Sets the log level. - * - * @param logLevel - * the new log level - */ - public void setLogLevel( LogLevel logLevel ) { - this.logLevel = logLevel; - log.setLogLevel( logLevel ); - } - - /** - * Gets the logging hierarchy. - * - * @return the logging hierarchy - */ - public List getLoggingHierarchy() { - List hierarchy = new ArrayList(); - List childIds = LoggingRegistry.getInstance().getLogChannelChildren( getLogChannelId() ); - for ( String childId : childIds ) { - LoggingObjectInterface loggingObject = LoggingRegistry.getInstance().getLoggingObject( childId ); - if ( loggingObject != null ) { - hierarchy.add( new LoggingHierarchy( getLogChannelId(), batchId, loggingObject ) ); - } - } - - return hierarchy; - } - - /** - * Gets the active sub-transformations. - * - * @return a map (by name) of the active sub-transformations - */ - public Map getActiveSubtransformations() { - return activeSubtransformations; - } - - /** - * Gets the active sub-jobs. - * - * @return a map (by name) of the active sub-jobs - */ - public Map getActiveSubjobs() { - return activeSubjobs; - } - - /** - * Gets the container object ID. - * - * @return the Carte object ID - */ - public String getContainerObjectId() { - return containerObjectId; - } - - /** - * Sets the container object ID. - * - * @param containerObjectId - * the Carte object ID to set - */ - public void setContainerObjectId( String containerObjectId ) { - this.containerObjectId = containerObjectId; - } - - /** - * Gets the registration date. For Trans, this always returns null - * - * @return null - */ - public Date getRegistrationDate() { - return null; - } - - /** - * Sets the servlet print writer. - * - * @param servletPrintWriter - * the new servlet print writer - */ - public void setServletPrintWriter( PrintWriter servletPrintWriter ) { - this.servletPrintWriter = servletPrintWriter; - } - - /** - * Gets the servlet print writer. - * - * @return the servlet print writer - */ - public PrintWriter getServletPrintWriter() { - return servletPrintWriter; - } - - /** - * Gets the name of the executing server. - * - * @return the executingServer - */ - public String getExecutingServer() { - return executingServer; - } - - /** - * Sets the name of the executing server. - * - * @param executingServer - * the executingServer to set - */ - public void setExecutingServer( String executingServer ) { - this.executingServer = executingServer; - } - - /** - * Gets the name of the executing user. - * - * @return the executingUser - */ - public String getExecutingUser() { - return executingUser; - } - - /** - * Sets the name of the executing user. - * - * @param executingUser - * the executingUser to set - */ - public void setExecutingUser( String executingUser ) { - this.executingUser = executingUser; - } - - @Override - public boolean isGatheringMetrics() { - return log != null && log.isGatheringMetrics(); - } - - @Override - public void setGatheringMetrics( boolean gatheringMetrics ) { - if ( log != null ) { - log.setGatheringMetrics( gatheringMetrics ); - } - } - - @Override - public boolean isForcingSeparateLogging() { - return log != null && log.isForcingSeparateLogging(); - } - - @Override - public void setForcingSeparateLogging( boolean forcingSeparateLogging ) { - if ( log != null ) { - log.setForcingSeparateLogging( forcingSeparateLogging ); - } - } - - public List getResultFiles() { - return resultFiles; - } - - public void setResultFiles( List resultFiles ) { - this.resultFiles = resultFiles; - } - - public List getResultRows() { - return resultRows; - } - - public void setResultRows( List resultRows ) { - this.resultRows = resultRows; - } - - public Result getPreviousResult() { - return previousResult; - } - - public void setPreviousResult( Result previousResult ) { - this.previousResult = previousResult; - } - - public Hashtable getCounters() { - return counters; - } - - public void setCounters( Hashtable counters ) { - this.counters = counters; - } - - public String[] getArguments() { - return arguments; - } - - public void setArguments( String[] arguments ) { - this.arguments = arguments; - } - - /** - * Clear the error in the transformation, clear all the rows from all the row sets, to make sure the transformation - * can continue with other data. This is intended for use when running single threaded. - */ - public void clearError() { - stopped.set( false ); - errors.set( 0 ); - setFinished( false ); - for ( StepMetaDataCombi combi : steps ) { - StepInterface step = combi.step; - for ( RowSet rowSet : step.getInputRowSets() ) { - rowSet.clear(); - } - step.setStopped( false ); - } - } - - /** - * Gets the transaction ID for the transformation. - * - * @return the transactionId - */ - public String getTransactionId() { - return transactionId; - } - - /** - * Sets the transaction ID for the transformation. - * - * @param transactionId - * the transactionId to set - */ - public void setTransactionId( String transactionId ) { - this.transactionId = transactionId; - } - - /** - * Calculates the transaction ID for the transformation. - * - * @return the calculated transaction ID for the transformation. - */ - public String calculateTransactionId() { - if ( getTransMeta() != null && getTransMeta().isUsingUniqueConnections() ) { - if ( parentJob != null && parentJob.getTransactionId() != null ) { - return parentJob.getTransactionId(); - } else if ( parentTrans != null && parentTrans.getTransMeta().isUsingUniqueConnections() ) { - return parentTrans.getTransactionId(); - } else { - return DatabaseConnectionMap.getInstance().getNextTransactionId(); - } - } else { - return Thread.currentThread().getName(); - } - } - - public IMetaStore getMetaStore() { - return metaStore; - } - - public void setMetaStore( IMetaStore metaStore ) { - this.metaStore = metaStore; - if ( transMeta != null ) { - transMeta.setMetaStore( metaStore ); - } - } - - /** - * Sets encoding of HttpServletResponse according to System encoding.Check if system encoding is null or an empty and - * set it to HttpServletResponse when not and writes error to log if null. Throw IllegalArgumentException if input - * parameter is null. - * - * @param response - * the HttpServletResponse to set encoding, mayn't be null - */ - public void setServletReponse( HttpServletResponse response ) { - if ( response == null ) { - throw new IllegalArgumentException( "Response is not valid: " + response ); - } - String encoding = System.getProperty( "KETTLE_DEFAULT_SERVLET_ENCODING", null ); - // true if encoding is null or an empty (also for the next kin of strings: " ") - if ( !StringUtils.isBlank( encoding ) ) { - try { - response.setCharacterEncoding( encoding.trim() ); - response.setContentType( "text/html; charset=" + encoding ); - } catch ( Exception ex ) { - LogChannel.GENERAL.logError( "Unable to encode data with encoding : '" + encoding + "'", ex ); - } - } - this.servletResponse = response; - } - - public HttpServletResponse getServletResponse() { - return servletResponse; - } - - public void setServletRequest( HttpServletRequest request ) { - this.servletRequest = request; - } - - public HttpServletRequest getServletRequest() { - return servletRequest; - } - - public List getDelegationListeners() { - return delegationListeners; - } - - public void setDelegationListeners( List delegationListeners ) { - this.delegationListeners = delegationListeners; - } - - public void addDelegationListener( DelegationListener delegationListener ) { - delegationListeners.add( delegationListener ); - } - - public synchronized void doTopologySortOfSteps() { - // The bubble sort algorithm in contrast to the QuickSort or MergeSort - // algorithms - // does indeed cover all possibilities. - // Sorting larger transformations with hundreds of steps might be too slow - // though. - // We should consider caching TransMeta.findPrevious() results in that case. - // - transMeta.clearCaches(); - - // - // Cocktail sort (bi-directional bubble sort) - // - // Original sort was taking 3ms for 30 steps - // cocktail sort takes about 8ms for the same 30, but it works :) - // - int stepsMinSize = 0; - int stepsSize = steps.size(); - - // Noticed a problem with an immediate shrinking iteration window - // trapping rows that need to be sorted. - // This threshold buys us some time to get the sorting close before - // starting to decrease the window size. - // - // TODO: this could become much smarter by tracking row movement - // and reacting to that each outer iteration verses - // using a threshold. - // - // After this many iterations enable trimming inner iteration - // window on no change being detected. - // - int windowShrinkThreshold = (int) Math.round( stepsSize * 0.75 ); - - // give ourselves some room to sort big lists. the window threshold should - // stop us before reaching this anyway. - // - int totalIterations = stepsSize * 2; - - boolean isBefore = false; - boolean forwardChange = false; - boolean backwardChange = false; - - boolean lastForwardChange = true; - boolean keepSortingForward = true; - - StepMetaDataCombi one = null; - StepMetaDataCombi two = null; - - for ( int x = 0; x < totalIterations; x++ ) { - - // Go forward through the list - // - if ( keepSortingForward ) { - for ( int y = stepsMinSize; y < stepsSize - 1; y++ ) { - one = steps.get( y ); - two = steps.get( y + 1 ); - - if ( one.stepMeta.equals( two.stepMeta ) ) { - isBefore = one.copy > two.copy; - } else { - isBefore = transMeta.findPrevious( one.stepMeta, two.stepMeta ); - } - if ( isBefore ) { - // two was found to be positioned BEFORE one so we need to - // switch them... - // - steps.set( y, two ); - steps.set( y + 1, one ); - forwardChange = true; + } - } + /** + * Gets the transaction ID for the transformation. + * + * @return the transactionId + */ + public String getTransactionId() { + return transactionId; + } + + /** + * Sets the transaction ID for the transformation. + * + * @param transactionId the transactionId to set + */ + public void setTransactionId(String transactionId) { + this.transactionId = transactionId; + } + + /** + * Calculates the transaction ID for the transformation. + * + * @return the calculated transaction ID for the transformation. + */ + public String calculateTransactionId() { + if (getTransMeta() != null && getTransMeta().isUsingUniqueConnections()) { + if (parentJob != null && parentJob.getTransactionId() != null) { + return parentJob.getTransactionId(); + } else if (parentTrans != null && parentTrans.getTransMeta().isUsingUniqueConnections()) { + return parentTrans.getTransactionId(); + } else { + return DatabaseConnectionMap.getInstance().getNextTransactionId(); + } + } else { + return Thread.currentThread().getName(); } - } + } - // Go backward through the list - // - for ( int z = stepsSize - 1; z > stepsMinSize; z-- ) { - one = steps.get( z ); - two = steps.get( z - 1 ); + public IMetaStore getMetaStore() { + return metaStore; + } - if ( one.stepMeta.equals( two.stepMeta ) ) { - isBefore = one.copy > two.copy; - } else { - isBefore = transMeta.findPrevious( one.stepMeta, two.stepMeta ); + public void setMetaStore(IMetaStore metaStore) { + this.metaStore = metaStore; + if (transMeta != null) { + transMeta.setMetaStore(metaStore); + } + } + + /** + * Sets encoding of HttpServletResponse according to System encoding.Check if system encoding is null or an empty and + * set it to HttpServletResponse when not and writes error to log if null. Throw IllegalArgumentException if input + * parameter is null. + * + * @param response the HttpServletResponse to set encoding, mayn't be null + */ + public void setServletReponse(HttpServletResponse response) { + if (response == null) { + throw new IllegalArgumentException("Response is not valid: " + response); } - if ( !isBefore ) { - // two was found NOT to be positioned BEFORE one so we need to - // switch them... - // - steps.set( z, two ); - steps.set( z - 1, one ); - backwardChange = true; + String encoding = System.getProperty("KETTLE_DEFAULT_SERVLET_ENCODING", null); + // true if encoding is null or an empty (also for the next kin of strings: " ") + if (!StringUtils.isBlank(encoding)) { + try { + response.setCharacterEncoding(encoding.trim()); + response.setContentType("text/html; charset=" + encoding); + } catch (Exception ex) { + LogChannel.GENERAL.logError("Unable to encode data with encoding : '" + encoding + "'", ex); + } } - } + this.servletResponse = response; + } + + public HttpServletResponse getServletResponse() { + return servletResponse; + } + + public void setServletRequest(HttpServletRequest request) { + this.servletRequest = request; + } - // Shrink stepsSize(max) if there was no forward change - // - if ( x > windowShrinkThreshold && !forwardChange ) { + public HttpServletRequest getServletRequest() { + return servletRequest; + } + + public List getDelegationListeners() { + return delegationListeners; + } + + public void setDelegationListeners(List delegationListeners) { + this.delegationListeners = delegationListeners; + } + + public void addDelegationListener(DelegationListener delegationListener) { + delegationListeners.add(delegationListener); + } - // should we keep going? check the window size + public synchronized void doTopologySortOfSteps() { + // The bubble sort algorithm in contrast to the QuickSort or MergeSort + // algorithms + // does indeed cover all possibilities. + // Sorting larger transformations with hundreds of steps might be too slow + // though. + // We should consider caching TransMeta.findPrevious() results in that case. // - stepsSize--; - if ( stepsSize <= stepsMinSize ) { - break; - } - } + transMeta.clearCaches(); - // shrink stepsMinSize(min) if there was no backward change - // - if ( x > windowShrinkThreshold && !backwardChange ) { + // + // Cocktail sort (bi-directional bubble sort) + // + // Original sort was taking 3ms for 30 steps + // cocktail sort takes about 8ms for the same 30, but it works :) + // + int stepsMinSize = 0; + int stepsSize = steps.size(); - // should we keep going? check the window size + // Noticed a problem with an immediate shrinking iteration window + // trapping rows that need to be sorted. + // This threshold buys us some time to get the sorting close before + // starting to decrease the window size. // - stepsMinSize++; - if ( stepsMinSize >= stepsSize ) { - break; - } - } - - // End of both forward and backward traversal. - // Time to see if we should keep going. - // - if ( !forwardChange && !backwardChange ) { - break; - } - - // - // if we are past the first iteration and there has been no change twice, - // quit doing it! - // - if ( keepSortingForward && x > 0 && !lastForwardChange && !forwardChange ) { - keepSortingForward = false; - } - lastForwardChange = forwardChange; - forwardChange = false; - backwardChange = false; - - } // finished sorting - } - - @Override - public Map getExtensionDataMap() { - return extensionDataMap; - } - - protected ExecutorService startHeartbeat( final long intervalInSeconds ) { - - ScheduledExecutorService heartbeat = Executors.newSingleThreadScheduledExecutor( new ThreadFactory() { - - @Override - public Thread newThread( Runnable r ) { - Thread thread = new Thread( r, "Transformation Heartbeat Thread for: " + getName() ); - thread.setDaemon( true ); - return thread; - } - } ); - - heartbeat.scheduleAtFixedRate( new Runnable() { - public void run() { - try { + // TODO: this could become much smarter by tracking row movement + // and reacting to that each outer iteration verses + // using a threshold. + // + // After this many iterations enable trimming inner iteration + // window on no change being detected. + // + int windowShrinkThreshold = (int) Math.round(stepsSize * 0.75); - if ( Trans.this.isFinished() ) { - log.logBasic( "Shutting down heartbeat signal for " + getName() ); - shutdownHeartbeat( Trans.this.heartbeat ); - return; - } + // give ourselves some room to sort big lists. the window threshold should + // stop us before reaching this anyway. + // + int totalIterations = stepsSize * 2; - log.logDebug( "Triggering heartbeat signal for " + getName() + " at every " + intervalInSeconds - + " seconds" ); - ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.TransformationHeartbeat.id, Trans.this ); + boolean isBefore = false; + boolean forwardChange = false; + boolean backwardChange = false; - } catch ( KettleException e ) { - log.logError( e.getMessage(), e ); - } - } - }, intervalInSeconds /* initial delay */, intervalInSeconds /* interval delay */, TimeUnit.SECONDS ); + boolean lastForwardChange = true; + boolean keepSortingForward = true; + + StepMetaDataCombi one = null; + StepMetaDataCombi two = null; + + for (int x = 0; x < totalIterations; x++) { + + // Go forward through the list + // + if (keepSortingForward) { + for (int y = stepsMinSize; y < stepsSize - 1; y++) { + one = steps.get(y); + two = steps.get(y + 1); + + if (one.stepMeta.equals(two.stepMeta)) { + isBefore = one.copy > two.copy; + } else { + isBefore = transMeta.findPrevious(one.stepMeta, two.stepMeta); + } + if (isBefore) { + // two was found to be positioned BEFORE one so we need to + // switch them... + // + steps.set(y, two); + steps.set(y + 1, one); + forwardChange = true; + + } + } + } + + // Go backward through the list + // + for (int z = stepsSize - 1; z > stepsMinSize; z--) { + one = steps.get(z); + two = steps.get(z - 1); + + if (one.stepMeta.equals(two.stepMeta)) { + isBefore = one.copy > two.copy; + } else { + isBefore = transMeta.findPrevious(one.stepMeta, two.stepMeta); + } + if (!isBefore) { + // two was found NOT to be positioned BEFORE one so we need to + // switch them... + // + steps.set(z, two); + steps.set(z - 1, one); + backwardChange = true; + } + } + + // Shrink stepsSize(max) if there was no forward change + // + if (x > windowShrinkThreshold && !forwardChange) { + + // should we keep going? check the window size + // + stepsSize--; + if (stepsSize <= stepsMinSize) { + break; + } + } + + // shrink stepsMinSize(min) if there was no backward change + // + if (x > windowShrinkThreshold && !backwardChange) { + + // should we keep going? check the window size + // + stepsMinSize++; + if (stepsMinSize >= stepsSize) { + break; + } + } + + // End of both forward and backward traversal. + // Time to see if we should keep going. + // + if (!forwardChange && !backwardChange) { + break; + } + + // + // if we are past the first iteration and there has been no change twice, + // quit doing it! + // + if (keepSortingForward && x > 0 && !lastForwardChange && !forwardChange) { + keepSortingForward = false; + } + lastForwardChange = forwardChange; + forwardChange = false; + backwardChange = false; + + } // finished sorting + } + + @Override + public Map getExtensionDataMap() { + return extensionDataMap; + } + + protected ExecutorService startHeartbeat(final long intervalInSeconds) { + + ScheduledExecutorService heartbeat = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() { + + @Override + public Thread newThread(Runnable r) { + Thread thread = new Thread(r, "Transformation Heartbeat Thread for: " + getName()); + thread.setDaemon(true); + return thread; + } + }); + + heartbeat.scheduleAtFixedRate(new Runnable() { + public void run() { + try { + + if (Trans.this.isFinished()) { + log.logBasic("Shutting down heartbeat signal for " + getName()); + shutdownHeartbeat(Trans.this.heartbeat); + return; + } + + log.logDebug("Triggering heartbeat signal for " + getName() + " at every " + intervalInSeconds + + " seconds"); + ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.TransformationHeartbeat.id, Trans.this); - return heartbeat; - } + } catch (KettleException e) { + log.logError(e.getMessage(), e); + } + } + }, intervalInSeconds /* initial delay */, intervalInSeconds /* interval delay */, TimeUnit.SECONDS); + + return heartbeat; + } - protected void shutdownHeartbeat( ExecutorService heartbeat ) { + protected void shutdownHeartbeat(ExecutorService heartbeat) { - if ( heartbeat != null ) { + if (heartbeat != null) { - try { - heartbeat.shutdownNow(); // prevents waiting tasks from starting and attempts to stop currently executing ones + try { + heartbeat.shutdownNow(); // prevents waiting tasks from starting and attempts to stop currently executing ones - } catch ( Throwable t ) { + } catch (Throwable t) { /* do nothing */ - } + } + } } - } - private int getHeartbeatIntervalInSeconds() { + private int getHeartbeatIntervalInSeconds() { - TransMeta meta = this.getTransMeta(); + TransMeta meta = this.getTransMeta(); - // 1 - check if there's a user defined value ( transformation-specific ) heartbeat periodic interval; - // 2 - check if there's a default defined value ( transformation-specific ) heartbeat periodic interval; - // 3 - use default Const.HEARTBEAT_PERIODIC_INTERVAL_IN_SECS if none of the above have been set + // 1 - check if there's a user defined value ( transformation-specific ) heartbeat periodic interval; + // 2 - check if there's a default defined value ( transformation-specific ) heartbeat periodic interval; + // 3 - use default Const.HEARTBEAT_PERIODIC_INTERVAL_IN_SECS if none of the above have been set - try { + try { - if ( meta != null ) { + if (meta != null) { - return Const.toInt( meta.getParameterValue( Const.VARIABLE_HEARTBEAT_PERIODIC_INTERVAL_SECS ), Const.toInt( meta - .getParameterDefault( Const.VARIABLE_HEARTBEAT_PERIODIC_INTERVAL_SECS ), - Const.HEARTBEAT_PERIODIC_INTERVAL_IN_SECS ) ); - } + return Const.toInt(meta.getParameterValue(Const.VARIABLE_HEARTBEAT_PERIODIC_INTERVAL_SECS), Const.toInt(meta + .getParameterDefault(Const.VARIABLE_HEARTBEAT_PERIODIC_INTERVAL_SECS), + Const.HEARTBEAT_PERIODIC_INTERVAL_IN_SECS)); + } - } catch ( Exception e ) { + } catch (Exception e) { /* do nothing, return Const.HEARTBEAT_PERIODIC_INTERVAL_IN_SECS */ - } + } - return Const.HEARTBEAT_PERIODIC_INTERVAL_IN_SECS; - } + return Const.HEARTBEAT_PERIODIC_INTERVAL_IN_SECS; + } } diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/TransMeta.java b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/TransMeta.java index b2eb0eb..9c01e16 100644 --- a/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/TransMeta.java +++ b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/TransMeta.java @@ -23,63 +23,22 @@ package org.pentaho.di.trans; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Hashtable; -import java.util.List; -import java.util.Map; -import java.util.Set; - import org.apache.commons.vfs2.FileName; import org.apache.commons.vfs2.FileObject; import org.apache.commons.vfs2.FileSystemException; import org.pentaho.di.base.AbstractMeta; import org.pentaho.di.cluster.ClusterSchema; import org.pentaho.di.cluster.SlaveServer; -import org.pentaho.di.core.CheckResult; -import org.pentaho.di.core.CheckResultInterface; -import org.pentaho.di.core.Const; -import org.pentaho.di.core.Counter; -import org.pentaho.di.core.DBCache; -import org.pentaho.di.core.LastUsedFile; -import org.pentaho.di.core.NotePadMeta; -import org.pentaho.di.core.ProgressMonitorListener; -import org.pentaho.di.core.Props; -import org.pentaho.di.core.Result; -import org.pentaho.di.core.ResultFile; -import org.pentaho.di.core.RowMetaAndData; -import org.pentaho.di.core.SQLStatement; +import org.pentaho.di.core.*; import org.pentaho.di.core.attributes.AttributesUtil; import org.pentaho.di.core.database.Database; import org.pentaho.di.core.database.DatabaseMeta; -import org.pentaho.di.core.exception.KettleDatabaseException; -import org.pentaho.di.core.exception.KettleException; -import org.pentaho.di.core.exception.KettleFileException; -import org.pentaho.di.core.exception.KettleMissingPluginsException; -import org.pentaho.di.core.exception.KettleRowException; -import org.pentaho.di.core.exception.KettleStepException; -import org.pentaho.di.core.exception.KettleXMLException; +import org.pentaho.di.core.exception.*; import org.pentaho.di.core.extension.ExtensionPointHandler; import org.pentaho.di.core.extension.KettleExtensionPoint; import org.pentaho.di.core.gui.OverwritePrompter; import org.pentaho.di.core.gui.Point; -import org.pentaho.di.core.logging.ChannelLogTable; -import org.pentaho.di.core.logging.LogChannel; -import org.pentaho.di.core.logging.LogChannelInterface; -import org.pentaho.di.core.logging.LogStatus; -import org.pentaho.di.core.logging.LogTableInterface; -import org.pentaho.di.core.logging.LoggingObjectInterface; -import org.pentaho.di.core.logging.LoggingObjectType; -import org.pentaho.di.core.logging.MetricsLogTable; -import org.pentaho.di.core.logging.PerformanceLogTable; -import org.pentaho.di.core.logging.StepLogTable; -import org.pentaho.di.core.logging.TransLogTable; +import org.pentaho.di.core.logging.*; import org.pentaho.di.core.parameters.NamedParamsDefault; import org.pentaho.di.core.reflection.StringSearchResult; import org.pentaho.di.core.reflection.StringSearcher; @@ -94,24 +53,14 @@ import org.pentaho.di.core.xml.XMLInterface; import org.pentaho.di.i18n.BaseMessages; import org.pentaho.di.partition.PartitionSchema; -import org.pentaho.di.repository.HasRepositoryInterface; -import org.pentaho.di.repository.Repository; -import org.pentaho.di.repository.RepositoryDirectory; -import org.pentaho.di.repository.RepositoryElementInterface; -import org.pentaho.di.repository.RepositoryObjectType; +import org.pentaho.di.repository.*; import org.pentaho.di.resource.ResourceDefinition; import org.pentaho.di.resource.ResourceExportInterface; import org.pentaho.di.resource.ResourceNamingInterface; import org.pentaho.di.resource.ResourceReference; import org.pentaho.di.shared.SharedObjectInterface; import org.pentaho.di.shared.SharedObjects; -import org.pentaho.di.trans.step.BaseStep; -import org.pentaho.di.trans.step.RemoteStep; -import org.pentaho.di.trans.step.StepErrorMeta; -import org.pentaho.di.trans.step.StepMeta; -import org.pentaho.di.trans.step.StepMetaChangeListenerInterface; -import org.pentaho.di.trans.step.StepMetaInterface; -import org.pentaho.di.trans.step.StepPartitioningMeta; +import org.pentaho.di.trans.step.*; import org.pentaho.di.trans.steps.jobexecutor.JobExecutorMeta; import org.pentaho.di.trans.steps.mapping.MappingMeta; import org.pentaho.di.trans.steps.missing.MissingTrans; @@ -122,6144 +71,5931 @@ import org.w3c.dom.Document; import org.w3c.dom.Node; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.*; + /** * This class defines information about a transformation and offers methods to save and load it from XML or a PDI * database repository, as well as methods to alter a transformation by adding/removing databases, steps, hops, etc. * - * @since 20-jun-2003 * @author Matt Casters + * @since 20-jun-2003 */ public class TransMeta extends AbstractMeta - implements XMLInterface, Comparator, Comparable, Cloneable, ResourceExportInterface, - RepositoryElementInterface, LoggingObjectInterface { + implements XMLInterface, Comparator, Comparable, Cloneable, ResourceExportInterface, + RepositoryElementInterface, LoggingObjectInterface { - /** The package name, used for internationalization of messages. */ - private static Class PKG = Trans.class; // for i18n purposes, needed by Translator2!! + /** + * The package name, used for internationalization of messages. + */ + private static Class PKG = Trans.class; // for i18n purposes, needed by Translator2!! - /** A constant specifying the tag value for the XML node of the transformation. */ - public static final String XML_TAG = "transformation"; + /** + * A constant specifying the tag value for the XML node of the transformation. + */ + public static final String XML_TAG = "transformation"; - /** - * A constant used by the logging operations to indicate any logged messages are related to transformation meta-data. - */ - public static final String STRING_TRANSMETA = "Transformation metadata"; + /** + * A constant used by the logging operations to indicate any logged messages are related to transformation meta-data. + */ + public static final String STRING_TRANSMETA = "Transformation metadata"; + + /** + * A constant specifying the repository element type as a Transformation. + */ + public static final RepositoryObjectType REPOSITORY_ELEMENT_TYPE = RepositoryObjectType.TRANSFORMATION; - /** A constant specifying the repository element type as a Transformation. */ - public static final RepositoryObjectType REPOSITORY_ELEMENT_TYPE = RepositoryObjectType.TRANSFORMATION; + public static final int BORDER_INDENT = 20; + /** + * The list of steps associated with the transformation. + */ + protected List steps; - public static final int BORDER_INDENT = 20; - /** The list of steps associated with the transformation. */ - protected List steps; + /** + * The list of hops associated with the transformation. + */ + protected List hops; - /** The list of hops associated with the transformation. */ - protected List hops; + /** + * The list of dependencies associated with the transformation. + */ + protected List dependencies; - /** The list of dependencies associated with the transformation. */ - protected List dependencies; + /** + * The list of cluster schemas associated with the transformation. + */ + protected List clusterSchemas; - /** The list of cluster schemas associated with the transformation. */ - protected List clusterSchemas; + /** + * The list of partition schemas associated with the transformation. + */ + private List partitionSchemas; - /** The list of partition schemas associated with the transformation. */ - private List partitionSchemas; + /** + * The version string for the transformation. + */ + protected String trans_version; - /** The version string for the transformation. */ - protected String trans_version; + /** + * The status of the transformation. + */ + protected int trans_status; - /** The status of the transformation. */ - protected int trans_status; + /** + * The transformation logging table associated with the transformation. + */ + protected TransLogTable transLogTable; - /** The transformation logging table associated with the transformation. */ - protected TransLogTable transLogTable; + /** + * The performance logging table associated with the transformation. + */ + protected PerformanceLogTable performanceLogTable; - /** The performance logging table associated with the transformation. */ - protected PerformanceLogTable performanceLogTable; + /** + * The step logging table associated with the transformation. + */ + protected StepLogTable stepLogTable; - /** The step logging table associated with the transformation. */ - protected StepLogTable stepLogTable; + /** + * The metricslogging table associated with the transformation. + */ + protected MetricsLogTable metricsLogTable; - /** The metricslogging table associated with the transformation. */ - protected MetricsLogTable metricsLogTable; + /** + * The size of the current rowset. + */ + protected int sizeRowset; - /** The size of the current rowset. */ - protected int sizeRowset; + /** + * The meta-data for the database connection associated with "max date" auditing information. + */ + protected DatabaseMeta maxDateConnection; - /** The meta-data for the database connection associated with "max date" auditing information. */ - protected DatabaseMeta maxDateConnection; + /** + * The table name associated with "max date" auditing information. + */ + protected String maxDateTable; - /** The table name associated with "max date" auditing information. */ - protected String maxDateTable; + /** + * The field associated with "max date" auditing information. + */ + protected String maxDateField; - /** The field associated with "max date" auditing information. */ - protected String maxDateField; + /** + * The amount by which to increase the "max date" value. + */ + protected double maxDateOffset; - /** The amount by which to increase the "max date" value. */ - protected double maxDateOffset; + /** + * The maximum date difference used for "max date" auditing and limiting job sizes. + */ + protected double maxDateDifference; - /** The maximum date difference used for "max date" auditing and limiting job sizes. */ - protected double maxDateDifference; + /** + * The list of arguments to the transformation. + * + * @deprecated Moved to Trans + */ + @Deprecated + protected String[] arguments; - /** - * The list of arguments to the transformation. - * - * @deprecated Moved to Trans - * */ - @Deprecated - protected String[] arguments; + /** + * A table of named counters. + * + * @deprecated Moved to Trans + */ + @Deprecated + protected Hashtable counters; - /** - * A table of named counters. - * - * @deprecated Moved to Trans - */ - @Deprecated - protected Hashtable counters; + /** + * Indicators for changes in steps, databases, hops, and notes. + */ + protected boolean changed_steps, changed_hops; - /** Indicators for changes in steps, databases, hops, and notes. */ - protected boolean changed_steps, changed_hops; + /** + * The database cache. + */ + protected DBCache dbCache; - /** The database cache. */ - protected DBCache dbCache; + /** + * The time (in nanoseconds) to wait when the input buffer is empty. + */ + protected int sleepTimeEmpty; - /** The time (in nanoseconds) to wait when the input buffer is empty. */ - protected int sleepTimeEmpty; + /** + * The time (in nanoseconds) to wait when the input buffer is full. + */ + protected int sleepTimeFull; - /** The time (in nanoseconds) to wait when the input buffer is full. */ - protected int sleepTimeFull; + /** + * The previous result. + */ + protected Result previousResult; - /** The previous result. */ - protected Result previousResult; + /** + * The result rows. + * + * @deprecated + */ + @Deprecated + protected List resultRows; - /** - * The result rows. - * - * @deprecated - * */ - @Deprecated - protected List resultRows; + /** + * The result files. + * + * @deprecated + */ + @Deprecated + protected List resultFiles; - /** - * The result files. - * - * @deprecated - * */ - @Deprecated - protected List resultFiles; + /** + * Whether the transformation is using unique connections. + */ + protected boolean usingUniqueConnections; + + /** + * Whether the feedback is shown. + */ + protected boolean feedbackShown; - /** Whether the transformation is using unique connections. */ - protected boolean usingUniqueConnections; + /** + * The feedback size. + */ + protected int feedbackSize; - /** Whether the feedback is shown. */ - protected boolean feedbackShown; + /** + * Flag to indicate thread management usage. Set to default to false from version 2.5.0 on. Before that it was enabled + * by default. + */ + protected boolean usingThreadPriorityManagment; - /** The feedback size. */ - protected int feedbackSize; + /** + * The slave-step-copy/partition distribution. Only used for slave transformations in a clustering environment. + */ + protected SlaveStepCopyPartitionDistribution slaveStepCopyPartitionDistribution; - /** - * Flag to indicate thread management usage. Set to default to false from version 2.5.0 on. Before that it was enabled - * by default. - */ - protected boolean usingThreadPriorityManagment; + /** + * Just a flag indicating that this is a slave transformation - internal use only, no GUI option. + */ + protected boolean slaveTransformation; - /** The slave-step-copy/partition distribution. Only used for slave transformations in a clustering environment. */ - protected SlaveStepCopyPartitionDistribution slaveStepCopyPartitionDistribution; + /** + * Whether the transformation is capturing step performance snap shots. + */ + protected boolean capturingStepPerformanceSnapShots; - /** Just a flag indicating that this is a slave transformation - internal use only, no GUI option. */ - protected boolean slaveTransformation; + /** + * The step performance capturing delay. + */ + protected long stepPerformanceCapturingDelay; - /** Whether the transformation is capturing step performance snap shots. */ - protected boolean capturingStepPerformanceSnapShots; + /** + * The step performance capturing size limit. + */ + protected String stepPerformanceCapturingSizeLimit; - /** The step performance capturing delay. */ - protected long stepPerformanceCapturingDelay; + /** + * The steps fields cache. + */ + protected Map stepsFieldsCache; - /** The step performance capturing size limit. */ - protected String stepPerformanceCapturingSizeLimit; + /** + * The loop cache. + */ + protected Map loopCache; - /** The steps fields cache. */ - protected Map stepsFieldsCache; + /** + * The log channel interface. + */ + protected LogChannelInterface log; - /** The loop cache. */ - protected Map loopCache; + /** + * The list of StepChangeListeners + */ + protected List stepChangeListeners; - /** The log channel interface. */ - protected LogChannelInterface log; + protected byte[] keyForSessionKey; + boolean isKeyPrivate; + private ArrayList missingTrans; - /** The list of StepChangeListeners */ - protected List stepChangeListeners; + /** + * The TransformationType enum describes the various types of transformations in terms of execution, including Normal, + * Serial Single-Threaded, and Single-Threaded. + */ + public enum TransformationType { + + /** + * A normal transformation. + */ + Normal("Normal", BaseMessages.getString(PKG, "TransMeta.TransformationType.Normal")), + + /** + * A serial single-threaded transformation. + */ + SerialSingleThreaded("SerialSingleThreaded", BaseMessages.getString( + PKG, "TransMeta.TransformationType.SerialSingleThreaded")), + + /** + * A single-threaded transformation. + */ + SingleThreaded("SingleThreaded", BaseMessages + .getString(PKG, "TransMeta.TransformationType.SingleThreaded")); + + /** + * The code corresponding to the transformation type. + */ + private String code; + + /** + * The description of the transformation type. + */ + private String description; + + /** + * Instantiates a new transformation type. + * + * @param code the code + * @param description the description + */ + private TransformationType(String code, String description) { + this.code = code; + this.description = description; + } - protected byte[] keyForSessionKey; - boolean isKeyPrivate; - private ArrayList missingTrans; + /** + * Gets the code corresponding to the transformation type. + * + * @return the code + */ + public String getCode() { + return code; + } - /** - * The TransformationType enum describes the various types of transformations in terms of execution, including Normal, - * Serial Single-Threaded, and Single-Threaded. - */ - public enum TransformationType { + /** + * Gets the description of the transformation type. + * + * @return the description + */ + public String getDescription() { + return description; + } - /** A normal transformation. */ - Normal( "Normal", BaseMessages.getString( PKG, "TransMeta.TransformationType.Normal" ) ), + /** + * Gets the transformation type by code. + * + * @param transTypeCode the trans type code + * @return the transformation type by code + */ + public static TransformationType getTransformationTypeByCode(String transTypeCode) { + if (transTypeCode != null) { + for (TransformationType type : values()) { + if (type.code.equalsIgnoreCase(transTypeCode)) { + return type; + } + } + } + return Normal; + } - /** A serial single-threaded transformation. */ - SerialSingleThreaded( "SerialSingleThreaded", BaseMessages.getString( - PKG, "TransMeta.TransformationType.SerialSingleThreaded" ) ), + /** + * Gets the transformation types descriptions. + * + * @return the transformation types descriptions + */ + public static String[] getTransformationTypesDescriptions() { + String[] desc = new String[values().length]; + for (int i = 0; i < values().length; i++) { + desc[i] = values()[i].getDescription(); + } + return desc; + } + } - /** A single-threaded transformation. */ - SingleThreaded( "SingleThreaded", BaseMessages - .getString( PKG, "TransMeta.TransformationType.SingleThreaded" ) ); + /** + * The transformation type. + */ + protected TransformationType transformationType; - /** The code corresponding to the transformation type. */ - private String code; + // ////////////////////////////////////////////////////////////////////////// - /** The description of the transformation type. */ - private String description; + /** + * A list of localized strings corresponding to string descriptions of the undo/redo actions. + */ + public static final String[] desc_type_undo = { + "", + BaseMessages.getString(PKG, "TransMeta.UndoTypeDesc.UndoChange"), + BaseMessages.getString(PKG, "TransMeta.UndoTypeDesc.UndoNew"), + BaseMessages.getString(PKG, "TransMeta.UndoTypeDesc.UndoDelete"), + BaseMessages.getString(PKG, "TransMeta.UndoTypeDesc.UndoPosition")}; /** - * Instantiates a new transformation type. - * - * @param code - * the code - * @param description - * the description + * A constant specifying the tag value for the XML node of the transformation information. */ - private TransformationType( String code, String description ) { - this.code = code; - this.description = description; - } + protected static final String XML_TAG_INFO = "info"; /** - * Gets the code corresponding to the transformation type. - * - * @return the code + * A constant specifying the tag value for the XML node of the order of steps. */ - public String getCode() { - return code; + public static final String XML_TAG_ORDER = "order"; + + /** + * A constant specifying the tag value for the XML node of the notes. + */ + public static final String XML_TAG_NOTEPADS = "notepads"; + + /** + * A constant specifying the tag value for the XML node of the transformation parameters. + */ + public static final String XML_TAG_PARAMETERS = "parameters"; + + /** + * A constant specifying the tag value for the XML node of the transformation dependencies. + */ + protected static final String XML_TAG_DEPENDENCIES = "dependencies"; + + /** + * A constant specifying the tag value for the XML node of the transformation's partition schemas. + */ + public static final String XML_TAG_PARTITIONSCHEMAS = "partitionschemas"; + + /** + * A constant specifying the tag value for the XML node of the slave servers. + */ + public static final String XML_TAG_SLAVESERVERS = "slaveservers"; + + /** + * A constant specifying the tag value for the XML node of the cluster schemas. + */ + public static final String XML_TAG_CLUSTERSCHEMAS = "clusterschemas"; + + /** + * A constant specifying the tag value for the XML node of the steps' error-handling information. + */ + public static final String XML_TAG_STEP_ERROR_HANDLING = "step_error_handling"; + + /** + * Builds a new empty transformation. The transformation will have default logging capability and no variables, and + * all internal meta-data is cleared to defaults. + */ + public TransMeta() { + clear(); + initializeVariablesFrom(null); } /** - * Gets the description of the transformation type. + * Builds a new empty transformation with a set of variables to inherit from. * - * @return the description + * @param parent the variable space to inherit from */ - public String getDescription() { - return description; + public TransMeta(VariableSpace parent) { + clear(); + initializeVariablesFrom(parent); + } + + public TransMeta(String filename, String name) { + clear(); + setFilename(filename); + this.name = name; + initializeVariablesFrom(null); } /** - * Gets the transformation type by code. + * Constructs a new transformation specifying the filename, name and arguments. * - * @param transTypeCode - * the trans type code - * @return the transformation type by code + * @param filename The filename of the transformation + * @param name The name of the transformation + * @param arguments The arguments as Strings + * @deprecated passing in arguments (a runtime argument) into the metadata is deprecated, pass it to Trans */ - public static TransformationType getTransformationTypeByCode( String transTypeCode ) { - if ( transTypeCode != null ) { - for ( TransformationType type : values() ) { - if ( type.code.equalsIgnoreCase( transTypeCode ) ) { - return type; - } - } - } - return Normal; + @Deprecated + public TransMeta(String filename, String name, String[] arguments) { + clear(); + setFilename(filename); + this.name = name; + this.arguments = arguments; + initializeVariablesFrom(null); } /** - * Gets the transformation types descriptions. + * Compares two transformation on name, filename, repository directory, etc. + * The comparison algorithm is as follows:
+ *
    + *
  1. The first transformation's filename is checked first; if it has none, the transformation comes from a + * repository. If the second transformation does not come from a repository, -1 is returned.
  2. + *
  3. If the transformations are both from a repository, the transformations' names are compared. If the first + * transformation has no name and the second one does, a -1 is returned. + * If the opposite is true, a 1 is returned.
  4. + *
  5. If they both have names they are compared as strings. If the result is non-zero it is returned. Otherwise the + * repository directories are compared using the same technique of checking empty values and then performing a string + * comparison, returning any non-zero result.
  6. + *
  7. If the names and directories are equal, the object revision strings are compared using the same technique of + * checking empty values and then performing a string comparison, this time ultimately returning the result of the + * string compare.
  8. + *
  9. If the first transformation does not come from a repository and the second one does, a 1 is returned. Otherwise + * the transformation names and filenames are subsequently compared using the same technique of checking empty values + * and then performing a string comparison, ultimately returning the result of the filename string comparison. + *
* - * @return the transformation types descriptions + * @param t1 the first transformation to compare + * @param t2 the second transformation to compare + * @return 0 if the two transformations are equal, 1 or -1 depending on the values (see description above) */ - public static String[] getTransformationTypesDescriptions() { - String[] desc = new String[values().length]; - for ( int i = 0; i < values().length; i++ ) { - desc[i] = values()[i].getDescription(); - } - return desc; - } - } + public int compare(TransMeta t1, TransMeta t2) { + // If we don't have a filename, the transformation comes from a repository + // + if (Const.isEmpty(t1.getFilename())) { - /** The transformation type. */ - protected TransformationType transformationType; + if (!Const.isEmpty(t2.getFilename())) { + return -1; + } - // ////////////////////////////////////////////////////////////////////////// + // First compare names... + // + if (Const.isEmpty(t1.getName()) && !Const.isEmpty(t2.getName())) { + return -1; + } + if (!Const.isEmpty(t1.getName()) && Const.isEmpty(t2.getName())) { + return 1; + } + int cmpName = t1.getName().compareTo(t2.getName()); + if (cmpName != 0) { + return cmpName; + } - /** A list of localized strings corresponding to string descriptions of the undo/redo actions. */ - public static final String[] desc_type_undo = { - "", - BaseMessages.getString( PKG, "TransMeta.UndoTypeDesc.UndoChange" ), - BaseMessages.getString( PKG, "TransMeta.UndoTypeDesc.UndoNew" ), - BaseMessages.getString( PKG, "TransMeta.UndoTypeDesc.UndoDelete" ), - BaseMessages.getString( PKG, "TransMeta.UndoTypeDesc.UndoPosition" ) }; + // Same name, compare Repository directory... + // + int cmpDirectory = t1.getRepositoryDirectory().getPath().compareTo(t2.getRepositoryDirectory().getPath()); + if (cmpDirectory != 0) { + return cmpDirectory; + } - /** A constant specifying the tag value for the XML node of the transformation information. */ - protected static final String XML_TAG_INFO = "info"; + // Same name, same directory, compare versions + // + if (t1.getObjectRevision() != null && t2.getObjectRevision() == null) { + return 1; + } + if (t1.getObjectRevision() == null && t2.getObjectRevision() != null) { + return -1; + } + if (t1.getObjectRevision() == null && t2.getObjectRevision() == null) { + return 0; + } + return t1.getObjectRevision().getName().compareTo(t2.getObjectRevision().getName()); - /** A constant specifying the tag value for the XML node of the order of steps. */ - public static final String XML_TAG_ORDER = "order"; + } else { + if (Const.isEmpty(t2.getFilename())) { + return 1; + } - /** A constant specifying the tag value for the XML node of the notes. */ - public static final String XML_TAG_NOTEPADS = "notepads"; + // First compare names + // + if (Const.isEmpty(t1.getName()) && !Const.isEmpty(t2.getName())) { + return -1; + } + if (!Const.isEmpty(t1.getName()) && Const.isEmpty(t2.getName())) { + return 1; + } + int cmpName = t1.getName().compareTo(t2.getName()); + if (cmpName != 0) { + return cmpName; + } - /** A constant specifying the tag value for the XML node of the transformation parameters. */ - public static final String XML_TAG_PARAMETERS = "parameters"; + // Same name, compare filenames... + // + return t1.getFilename().compareTo(t2.getFilename()); + } + } - /** A constant specifying the tag value for the XML node of the transformation dependencies. */ - protected static final String XML_TAG_DEPENDENCIES = "dependencies"; + /** + * Compares this transformation's meta-data to the specified transformation's meta-data. This method simply calls + * compare(this, o) + * + * @param o the o + * @return the int + * @see #compare(TransMeta, TransMeta) + * @see java.lang.Comparable#compareTo(java.lang.Object) + */ + public int compareTo(TransMeta o) { + return compare(this, o); + } - /** A constant specifying the tag value for the XML node of the transformation's partition schemas. */ - public static final String XML_TAG_PARTITIONSCHEMAS = "partitionschemas"; + /** + * Checks whether this transformation's meta-data object is equal to the specified object. If the specified object is + * not an instance of TransMeta, false is returned. Otherwise the method returns whether a call to compare() indicates + * equality (i.e. compare(this, (TransMeta)obj)==0). + * + * @param obj the obj + * @return true, if successful + * @see #compare(TransMeta, TransMeta) + * @see java.lang.Object#equals(java.lang.Object) + */ + public boolean equals(Object obj) { + if (!(obj instanceof TransMeta)) { + return false; + } - /** A constant specifying the tag value for the XML node of the slave servers. */ - public static final String XML_TAG_SLAVESERVERS = "slaveservers"; + return compare(this, (TransMeta) obj) == 0; + } - /** A constant specifying the tag value for the XML node of the cluster schemas. */ - public static final String XML_TAG_CLUSTERSCHEMAS = "clusterschemas"; + /** + * Clones the transformation meta-data object. + * + * @return a clone of the transformation meta-data object + * @see java.lang.Object#clone() + */ + @Override + public Object clone() { + return realClone(true); + } - /** A constant specifying the tag value for the XML node of the steps' error-handling information. */ - public static final String XML_TAG_STEP_ERROR_HANDLING = "step_error_handling"; + /** + * Perform a real clone of the transformation meta-data object, including cloning all lists and copying all values. If + * the doClear parameter is true, the clone will be cleared of ALL values before the copy. If false, only the copied + * fields will be cleared. + * + * @param doClear Whether to clear all of the clone's data before copying from the source object + * @return a real clone of the calling object + */ + public Object realClone(boolean doClear) { - /** - * Builds a new empty transformation. The transformation will have default logging capability and no variables, and - * all internal meta-data is cleared to defaults. - */ - public TransMeta() { - clear(); - initializeVariablesFrom( null ); - } + try { + TransMeta transMeta = (TransMeta) super.clone(); + if (doClear) { + transMeta.clear(); + } else { + // Clear out the things we're replacing below + transMeta.databases = new ArrayList(); + transMeta.steps = new ArrayList(); + transMeta.hops = new ArrayList(); + transMeta.notes = new ArrayList(); + transMeta.dependencies = new ArrayList(); + transMeta.partitionSchemas = new ArrayList(); + transMeta.slaveServers = new ArrayList(); + transMeta.clusterSchemas = new ArrayList(); + transMeta.namedParams = new NamedParamsDefault(); + transMeta.stepChangeListeners = new ArrayList(); + } + for (DatabaseMeta db : databases) { + transMeta.addDatabase((DatabaseMeta) db.clone()); + } + for (StepMeta step : steps) { + transMeta.addStep((StepMeta) step.clone()); + } + for (TransHopMeta hop : hops) { + transMeta.addTransHop((TransHopMeta) hop.clone()); + } + for (NotePadMeta note : notes) { + transMeta.addNote((NotePadMeta) note.clone()); + } + for (TransDependency dep : dependencies) { + transMeta.addDependency((TransDependency) dep.clone()); + } + for (SlaveServer slave : slaveServers) { + transMeta.getSlaveServers().add((SlaveServer) slave.clone()); + } + for (ClusterSchema schema : clusterSchemas) { + transMeta.getClusterSchemas().add(schema.clone()); + } + for (PartitionSchema schema : partitionSchemas) { + transMeta.getPartitionSchemas().add((PartitionSchema) schema.clone()); + } + for (String key : listParameters()) { + transMeta.addParameterDefinition(key, getParameterDefault(key), getParameterDescription(key)); + } - /** - * Builds a new empty transformation with a set of variables to inherit from. - * - * @param parent - * the variable space to inherit from - */ - public TransMeta( VariableSpace parent ) { - clear(); - initializeVariablesFrom( parent ); - } - - public TransMeta( String filename, String name ) { - clear(); - setFilename( filename ); - this.name = name; - initializeVariablesFrom( null ); - } - - /** - * Constructs a new transformation specifying the filename, name and arguments. - * - * @param filename - * The filename of the transformation - * @param name - * The name of the transformation - * @param arguments - * The arguments as Strings - * @deprecated passing in arguments (a runtime argument) into the metadata is deprecated, pass it to Trans - */ - @Deprecated - public TransMeta( String filename, String name, String[] arguments ) { - clear(); - setFilename( filename ); - this.name = name; - this.arguments = arguments; - initializeVariablesFrom( null ); - } - - /** - * Compares two transformation on name, filename, repository directory, etc. - * The comparison algorithm is as follows:
- *
    - *
  1. The first transformation's filename is checked first; if it has none, the transformation comes from a - * repository. If the second transformation does not come from a repository, -1 is returned.
  2. - *
  3. If the transformations are both from a repository, the transformations' names are compared. If the first - * transformation has no name and the second one does, a -1 is returned. - * If the opposite is true, a 1 is returned.
  4. - *
  5. If they both have names they are compared as strings. If the result is non-zero it is returned. Otherwise the - * repository directories are compared using the same technique of checking empty values and then performing a string - * comparison, returning any non-zero result.
  6. - *
  7. If the names and directories are equal, the object revision strings are compared using the same technique of - * checking empty values and then performing a string comparison, this time ultimately returning the result of the - * string compare.
  8. - *
  9. If the first transformation does not come from a repository and the second one does, a 1 is returned. Otherwise - * the transformation names and filenames are subsequently compared using the same technique of checking empty values - * and then performing a string comparison, ultimately returning the result of the filename string comparison. - *
- * - * @param t1 - * the first transformation to compare - * @param t2 - * the second transformation to compare - * @return 0 if the two transformations are equal, 1 or -1 depending on the values (see description above) - * - */ - public int compare( TransMeta t1, TransMeta t2 ) { - // If we don't have a filename, the transformation comes from a repository - // - if ( Const.isEmpty( t1.getFilename() ) ) { - - if ( !Const.isEmpty( t2.getFilename() ) ) { - return -1; - } - - // First compare names... - // - if ( Const.isEmpty( t1.getName() ) && !Const.isEmpty( t2.getName() ) ) { - return -1; - } - if ( !Const.isEmpty( t1.getName() ) && Const.isEmpty( t2.getName() ) ) { - return 1; - } - int cmpName = t1.getName().compareTo( t2.getName() ); - if ( cmpName != 0 ) { - return cmpName; - } - - // Same name, compare Repository directory... - // - int cmpDirectory = t1.getRepositoryDirectory().getPath().compareTo( t2.getRepositoryDirectory().getPath() ); - if ( cmpDirectory != 0 ) { - return cmpDirectory; - } - - // Same name, same directory, compare versions - // - if ( t1.getObjectRevision() != null && t2.getObjectRevision() == null ) { - return 1; - } - if ( t1.getObjectRevision() == null && t2.getObjectRevision() != null ) { - return -1; - } - if ( t1.getObjectRevision() == null && t2.getObjectRevision() == null ) { - return 0; - } - return t1.getObjectRevision().getName().compareTo( t2.getObjectRevision().getName() ); - - } else { - if ( Const.isEmpty( t2.getFilename() ) ) { - return 1; - } - - // First compare names - // - if ( Const.isEmpty( t1.getName() ) && !Const.isEmpty( t2.getName() ) ) { - return -1; - } - if ( !Const.isEmpty( t1.getName() ) && Const.isEmpty( t2.getName() ) ) { - return 1; - } - int cmpName = t1.getName().compareTo( t2.getName() ); - if ( cmpName != 0 ) { - return cmpName; - } - - // Same name, compare filenames... - // - return t1.getFilename().compareTo( t2.getFilename() ); - } - } - - /** - * Compares this transformation's meta-data to the specified transformation's meta-data. This method simply calls - * compare(this, o) - * - * @param o - * the o - * @return the int - * @see #compare(TransMeta, TransMeta) - * @see java.lang.Comparable#compareTo(java.lang.Object) - */ - public int compareTo( TransMeta o ) { - return compare( this, o ); - } - - /** - * Checks whether this transformation's meta-data object is equal to the specified object. If the specified object is - * not an instance of TransMeta, false is returned. Otherwise the method returns whether a call to compare() indicates - * equality (i.e. compare(this, (TransMeta)obj)==0). - * - * @param obj - * the obj - * @return true, if successful - * @see #compare(TransMeta, TransMeta) - * @see java.lang.Object#equals(java.lang.Object) - */ - public boolean equals( Object obj ) { - if ( !( obj instanceof TransMeta ) ) { - return false; + return transMeta; + } catch (Exception e) { + e.printStackTrace(); + return null; + } } - return compare( this, (TransMeta) obj ) == 0; - } + /** + * Clears the transformation's meta-data, including the lists of databases, steps, hops, notes, dependencies, + * partition schemas, slave servers, and cluster schemas. Logging information and timeouts are reset to defaults, and + * recent connection info is cleared. + */ + @Override + public void clear() { + setObjectId(null); + steps = new ArrayList(); + hops = new ArrayList(); + dependencies = new ArrayList(); + partitionSchemas = new ArrayList(); + clusterSchemas = new ArrayList(); + stepChangeListeners = new ArrayList(); - /** - * Clones the transformation meta-data object. - * - * @return a clone of the transformation meta-data object - * @see java.lang.Object#clone() - */ - @Override - public Object clone() { - return realClone( true ); - } - - /** - * Perform a real clone of the transformation meta-data object, including cloning all lists and copying all values. If - * the doClear parameter is true, the clone will be cleared of ALL values before the copy. If false, only the copied - * fields will be cleared. - * - * @param doClear - * Whether to clear all of the clone's data before copying from the source object - * @return a real clone of the calling object - */ - public Object realClone( boolean doClear ) { - - try { - TransMeta transMeta = (TransMeta) super.clone(); - if ( doClear ) { - transMeta.clear(); - } else { - // Clear out the things we're replacing below - transMeta.databases = new ArrayList(); - transMeta.steps = new ArrayList(); - transMeta.hops = new ArrayList(); - transMeta.notes = new ArrayList(); - transMeta.dependencies = new ArrayList(); - transMeta.partitionSchemas = new ArrayList(); - transMeta.slaveServers = new ArrayList(); - transMeta.clusterSchemas = new ArrayList(); - transMeta.namedParams = new NamedParamsDefault(); - transMeta.stepChangeListeners = new ArrayList(); - } - for ( DatabaseMeta db : databases ) { - transMeta.addDatabase( (DatabaseMeta) db.clone() ); - } - for ( StepMeta step : steps ) { - transMeta.addStep( (StepMeta) step.clone() ); - } - for ( TransHopMeta hop : hops ) { - transMeta.addTransHop( (TransHopMeta) hop.clone() ); - } - for ( NotePadMeta note : notes ) { - transMeta.addNote( (NotePadMeta) note.clone() ); - } - for ( TransDependency dep : dependencies ) { - transMeta.addDependency( (TransDependency) dep.clone() ); - } - for ( SlaveServer slave : slaveServers ) { - transMeta.getSlaveServers().add( (SlaveServer) slave.clone() ); - } - for ( ClusterSchema schema : clusterSchemas ) { - transMeta.getClusterSchemas().add( schema.clone() ); - } - for ( PartitionSchema schema : partitionSchemas ) { - transMeta.getPartitionSchemas().add( (PartitionSchema) schema.clone() ); - } - for ( String key : listParameters() ) { - transMeta.addParameterDefinition( key, getParameterDefault( key ), getParameterDescription( key ) ); - } - - return transMeta; - } catch ( Exception e ) { - e.printStackTrace(); - return null; - } - } - - /** - * Clears the transformation's meta-data, including the lists of databases, steps, hops, notes, dependencies, - * partition schemas, slave servers, and cluster schemas. Logging information and timeouts are reset to defaults, and - * recent connection info is cleared. - */ - @Override - public void clear() { - setObjectId( null ); - steps = new ArrayList(); - hops = new ArrayList(); - dependencies = new ArrayList(); - partitionSchemas = new ArrayList(); - clusterSchemas = new ArrayList(); - stepChangeListeners = new ArrayList(); + slaveStepCopyPartitionDistribution = new SlaveStepCopyPartitionDistribution(); - slaveStepCopyPartitionDistribution = new SlaveStepCopyPartitionDistribution(); + trans_status = -1; + trans_version = null; - trans_status = -1; - trans_version = null; + transLogTable = TransLogTable.getDefault(this, this, steps); + performanceLogTable = PerformanceLogTable.getDefault(this, this); + stepLogTable = StepLogTable.getDefault(this, this); + metricsLogTable = MetricsLogTable.getDefault(this, this); - transLogTable = TransLogTable.getDefault( this, this, steps ); - performanceLogTable = PerformanceLogTable.getDefault( this, this ); - stepLogTable = StepLogTable.getDefault( this, this ); - metricsLogTable = MetricsLogTable.getDefault( this, this ); + sizeRowset = Const.ROWS_IN_ROWSET; + sleepTimeEmpty = Const.TIMEOUT_GET_MILLIS; + sleepTimeFull = Const.TIMEOUT_PUT_MILLIS; - sizeRowset = Const.ROWS_IN_ROWSET; - sleepTimeEmpty = Const.TIMEOUT_GET_MILLIS; - sleepTimeFull = Const.TIMEOUT_PUT_MILLIS; + maxDateConnection = null; + maxDateTable = null; + maxDateField = null; + maxDateOffset = 0.0; - maxDateConnection = null; - maxDateTable = null; - maxDateField = null; - maxDateOffset = 0.0; + maxDateDifference = 0.0; - maxDateDifference = 0.0; + undo = new ArrayList(); + max_undo = Const.MAX_UNDO; + undo_position = -1; - undo = new ArrayList(); - max_undo = Const.MAX_UNDO; - undo_position = -1; + counters = new Hashtable(); + resultRows = null; - counters = new Hashtable(); - resultRows = null; + super.clear(); - super.clear(); + // LOAD THE DATABASE CACHE! + dbCache = DBCache.getInstance(); - // LOAD THE DATABASE CACHE! - dbCache = DBCache.getInstance(); + resultRows = new ArrayList(); + resultFiles = new ArrayList(); - resultRows = new ArrayList(); - resultFiles = new ArrayList(); + feedbackShown = true; + feedbackSize = Const.ROWS_UPDATE; - feedbackShown = true; - feedbackSize = Const.ROWS_UPDATE; + // Thread priority: + // - set to false in version 2.5.0 + // - re-enabling in version 3.0.1 to prevent excessive locking (PDI-491) + // + usingThreadPriorityManagment = true; - // Thread priority: - // - set to false in version 2.5.0 - // - re-enabling in version 3.0.1 to prevent excessive locking (PDI-491) - // - usingThreadPriorityManagment = true; + // The performance monitoring options + // + capturingStepPerformanceSnapShots = false; + stepPerformanceCapturingDelay = 1000; // every 1 seconds + stepPerformanceCapturingSizeLimit = "100"; // maximum 100 data points - // The performance monitoring options - // - capturingStepPerformanceSnapShots = false; - stepPerformanceCapturingDelay = 1000; // every 1 seconds - stepPerformanceCapturingSizeLimit = "100"; // maximum 100 data points + stepsFieldsCache = new HashMap(); + loopCache = new HashMap(); + transformationType = TransformationType.Normal; - stepsFieldsCache = new HashMap(); - loopCache = new HashMap(); - transformationType = TransformationType.Normal; + log = LogChannel.GENERAL; + } - log = LogChannel.GENERAL; - } + /** + * Add a new step to the transformation. Also marks that the transformation's steps have changed. + * + * @param stepMeta The meta-data for the step to be added. + */ + public void addStep(StepMeta stepMeta) { + steps.add(stepMeta); + stepMeta.setParentTransMeta(this); + StepMetaInterface iface = stepMeta.getStepMetaInterface(); + if (iface instanceof StepMetaChangeListenerInterface) { + addStepChangeListener((StepMetaChangeListenerInterface) iface); + } + changed_steps = true; + } - /** - * Add a new step to the transformation. Also marks that the transformation's steps have changed. - * - * @param stepMeta - * The meta-data for the step to be added. - */ - public void addStep( StepMeta stepMeta ) { - steps.add( stepMeta ); - stepMeta.setParentTransMeta( this ); - StepMetaInterface iface = stepMeta.getStepMetaInterface(); - if ( iface instanceof StepMetaChangeListenerInterface ) { - addStepChangeListener( (StepMetaChangeListenerInterface) iface ); - } - changed_steps = true; - } - - /** - * Add a new step to the transformation if that step didn't exist yet. Otherwise, replace the step. This method also - * marks that the transformation's steps have changed. - * - * @param stepMeta - * The meta-data for the step to be added. - */ - public void addOrReplaceStep( StepMeta stepMeta ) { - int index = steps.indexOf( stepMeta ); - if ( index < 0 ) { - index = steps.add( stepMeta ) ? 0 : index; - } else { - StepMeta previous = getStep( index ); - previous.replaceMeta( stepMeta ); - } - stepMeta.setParentTransMeta( this ); - StepMetaInterface iface = stepMeta.getStepMetaInterface(); - if ( index != -1 && iface instanceof StepMetaChangeListenerInterface ) { - addStepChangeListener( index, (StepMetaChangeListenerInterface) iface ); - } - changed_steps = true; - } - - /** - * Add a new hop to the transformation. The hop information (source and target steps, e.g.) should be configured in - * the TransHopMeta object before calling addTransHop(). Also marks that the transformation's hops have changed. - * - * @param hi - * The hop meta-data to be added. - */ - public void addTransHop( TransHopMeta hi ) { - hops.add( hi ); - changed_hops = true; - } + /** + * Add a new step to the transformation if that step didn't exist yet. Otherwise, replace the step. This method also + * marks that the transformation's steps have changed. + * + * @param stepMeta The meta-data for the step to be added. + */ + public void addOrReplaceStep(StepMeta stepMeta) { + int index = steps.indexOf(stepMeta); + if (index < 0) { + index = steps.add(stepMeta) ? 0 : index; + } else { + StepMeta previous = getStep(index); + previous.replaceMeta(stepMeta); + } + stepMeta.setParentTransMeta(this); + StepMetaInterface iface = stepMeta.getStepMetaInterface(); + if (index != -1 && iface instanceof StepMetaChangeListenerInterface) { + addStepChangeListener(index, (StepMetaChangeListenerInterface) iface); + } + changed_steps = true; + } - /** - * Add a new dependency to the transformation. - * - * @param td - * The transformation dependency to be added. - */ - public void addDependency( TransDependency td ) { - dependencies.add( td ); - } + /** + * Add a new hop to the transformation. The hop information (source and target steps, e.g.) should be configured in + * the TransHopMeta object before calling addTransHop(). Also marks that the transformation's hops have changed. + * + * @param hi The hop meta-data to be added. + */ + public void addTransHop(TransHopMeta hi) { + hops.add(hi); + changed_hops = true; + } - /** - * Add a new step to the transformation at the specified index. This method sets the step's parent transformation to - * the this transformation, and marks that the transformations' steps have changed. - * - * @param p - * The index into the step list - * @param stepMeta - * The step to be added. - */ - public void addStep( int p, StepMeta stepMeta ) { - StepMetaInterface iface = stepMeta.getStepMetaInterface(); - if ( iface instanceof StepMetaChangeListenerInterface ) { - addStepChangeListener( p, (StepMetaChangeListenerInterface) stepMeta.getStepMetaInterface() ); - } - steps.add( p, stepMeta ); - stepMeta.setParentTransMeta( this ); - changed_steps = true; - } - - /** - * Add a new hop to the transformation on a certain location (i.e. the specified index). Also marks that the - * transformation's hops have changed. - * - * @param p - * the index into the hop list - * @param hi - * The hop to be added. - */ - public void addTransHop( int p, TransHopMeta hi ) { - try { - hops.add( p, hi ); - } catch ( IndexOutOfBoundsException e ) { - hops.add( hi ); + /** + * Add a new dependency to the transformation. + * + * @param td The transformation dependency to be added. + */ + public void addDependency(TransDependency td) { + dependencies.add(td); } - changed_hops = true; - } - - /** - * Add a new dependency to the transformation on a certain location (i.e. the specified index). - * - * @param p - * The index into the dependencies list. - * @param td - * The transformation dependency to be added. - */ - public void addDependency( int p, TransDependency td ) { - dependencies.add( p, td ); - } - - /** - * Get a list of defined steps in this transformation. - * - * @return an ArrayList of defined steps. - */ - public List getSteps() { - return steps; - } - - /** - * Retrieves a step on a certain location (i.e. the specified index). - * - * @param i - * The index into the steps list. - * @return The desired step's meta-data. - */ - public StepMeta getStep( int i ) { - return steps.get( i ); - } - /** - * Retrieves a hop on a certain location (i.e. the specified index). - * - * @param i - * The index into the hops list. - * @return The desired hop's meta-data. - */ - public TransHopMeta getTransHop( int i ) { - return hops.get( i ); - } - - /** - * Retrieves a dependency on a certain location (i.e. the specified index). - * - * @param i - * The index into the dependencies list. - * @return The dependency object. - */ - public TransDependency getDependency( int i ) { - return dependencies.get( i ); - } - - /** - * Removes a step from the transformation on a certain location (i.e. the specified index). Also marks that the - * transformation's steps have changed. - * - * @param i - * The index - */ - public void removeStep( int i ) { - if ( i < 0 || i >= steps.size() ) { - return; + /** + * Add a new step to the transformation at the specified index. This method sets the step's parent transformation to + * the this transformation, and marks that the transformations' steps have changed. + * + * @param p The index into the step list + * @param stepMeta The step to be added. + */ + public void addStep(int p, StepMeta stepMeta) { + StepMetaInterface iface = stepMeta.getStepMetaInterface(); + if (iface instanceof StepMetaChangeListenerInterface) { + addStepChangeListener(p, (StepMetaChangeListenerInterface) stepMeta.getStepMetaInterface()); + } + steps.add(p, stepMeta); + stepMeta.setParentTransMeta(this); + changed_steps = true; } - StepMeta removeStep = steps.get( i ); - StepMetaInterface iface = removeStep.getStepMetaInterface(); - if ( iface instanceof StepMetaChangeListenerInterface ) { - removeStepChangeListener( (StepMetaChangeListenerInterface) iface ); + /** + * Add a new hop to the transformation on a certain location (i.e. the specified index). Also marks that the + * transformation's hops have changed. + * + * @param p the index into the hop list + * @param hi The hop to be added. + */ + public void addTransHop(int p, TransHopMeta hi) { + try { + hops.add(p, hi); + } catch (IndexOutOfBoundsException e) { + hops.add(hi); + } + changed_hops = true; } - steps.remove( i ); - - if ( removeStep.getStepMetaInterface() instanceof MissingTrans ) { - removeMissingTrans( (MissingTrans) removeStep.getStepMetaInterface() ); + /** + * Add a new dependency to the transformation on a certain location (i.e. the specified index). + * + * @param p The index into the dependencies list. + * @param td The transformation dependency to be added. + */ + public void addDependency(int p, TransDependency td) { + dependencies.add(p, td); } - changed_steps = true; - } - - /** - * Removes a hop from the transformation on a certain location (i.e. the specified index). Also marks that the - * transformation's hops have changed. - * - * @param i - * The index into the hops list - */ - public void removeTransHop( int i ) { - if ( i < 0 || i >= hops.size() ) { - return; + /** + * Get a list of defined steps in this transformation. + * + * @return an ArrayList of defined steps. + */ + public List getSteps() { + return steps; } - hops.remove( i ); - changed_hops = true; - } - - /** - * Removes a hop from the transformation. Also marks that the - * transformation's hops have changed. - * - * @param hop - * The hop to remove from the list of hops - */ - public void removeTransHop( TransHopMeta hop ) { - hops.remove( hop ); - changed_hops = true; - } - - /** - * Removes a dependency from the transformation on a certain location (i.e. the specified index). - * - * @param i - * The location - */ - public void removeDependency( int i ) { - if ( i < 0 || i >= dependencies.size() ) { - return; + /** + * Retrieves a step on a certain location (i.e. the specified index). + * + * @param i The index into the steps list. + * @return The desired step's meta-data. + */ + public StepMeta getStep(int i) { + return steps.get(i); } - dependencies.remove( i ); - } - /** - * Clears all the dependencies from the transformation. - */ - public void removeAllDependencies() { - dependencies.clear(); - } + /** + * Retrieves a hop on a certain location (i.e. the specified index). + * + * @param i The index into the hops list. + * @return The desired hop's meta-data. + */ + public TransHopMeta getTransHop(int i) { + return hops.get(i); + } - /** - * Gets the number of steps in the transformation. - * - * @return The number of steps in the transformation. - */ - public int nrSteps() { - return steps.size(); - } + /** + * Retrieves a dependency on a certain location (i.e. the specified index). + * + * @param i The index into the dependencies list. + * @return The dependency object. + */ + public TransDependency getDependency(int i) { + return dependencies.get(i); + } - /** - * Gets the number of hops in the transformation. - * - * @return The number of hops in the transformation. - */ - public int nrTransHops() { - return hops.size(); - } + /** + * Removes a step from the transformation on a certain location (i.e. the specified index). Also marks that the + * transformation's steps have changed. + * + * @param i The index + */ + public void removeStep(int i) { + if (i < 0 || i >= steps.size()) { + return; + } - /** - * Gets the number of dependencies in the transformation. - * - * @return The number of dependencies in the transformation. - */ - public int nrDependencies() { - return dependencies.size(); - } + StepMeta removeStep = steps.get(i); + StepMetaInterface iface = removeStep.getStepMetaInterface(); + if (iface instanceof StepMetaChangeListenerInterface) { + removeStepChangeListener((StepMetaChangeListenerInterface) iface); + } - /** - * Gets the number of stepChangeListeners in the transformation. - * - * @return The number of stepChangeListeners in the transformation. - */ - public int nrStepChangeListeners() { - return stepChangeListeners.size(); - } - - /** - * Changes the content of a step on a certain position. This is accomplished by setting the step's metadata at the - * specified index to the specified meta-data object. The new step's parent transformation is updated to be this - * transformation. - * - * @param i - * The index into the steps list - * @param stepMeta - * The step meta-data to set - */ - public void setStep( int i, StepMeta stepMeta ) { - StepMetaInterface iface = stepMeta.getStepMetaInterface(); - if ( iface instanceof StepMetaChangeListenerInterface ) { - addStepChangeListener( i, (StepMetaChangeListenerInterface) stepMeta.getStepMetaInterface() ); - } - steps.set( i, stepMeta ); - stepMeta.setParentTransMeta( this ); - } - - /** - * Changes the content of a hop on a certain position. This is accomplished by setting the hop's metadata at the - * specified index to the specified meta-data object. - * - * @param i - * The index into the hops list - * @param hi - * The hop meta-data to set - */ - public void setTransHop( int i, TransHopMeta hi ) { - hops.set( i, hi ); - } + steps.remove(i); - /** - * Gets the list of used steps, which are the steps that are connected by hops. - * - * @return a list with all the used steps - */ - public List getUsedSteps() { - List list = new ArrayList(); + if (removeStep.getStepMetaInterface() instanceof MissingTrans) { + removeMissingTrans((MissingTrans) removeStep.getStepMetaInterface()); + } - for ( StepMeta stepMeta : steps ) { - if ( isStepUsedInTransHops( stepMeta ) ) { - list.add( stepMeta ); - } + changed_steps = true; } - return list; - } + /** + * Removes a hop from the transformation on a certain location (i.e. the specified index). Also marks that the + * transformation's hops have changed. + * + * @param i The index into the hops list + */ + public void removeTransHop(int i) { + if (i < 0 || i >= hops.size()) { + return; + } - /** - * Searches the list of steps for a step with a certain name. - * - * @param name - * The name of the step to look for - * @return The step information or null if no nothing was found. - */ - public StepMeta findStep( String name ) { - return findStep( name, null ); - } + hops.remove(i); + changed_hops = true; + } - /** - * Searches the list of steps for a step with a certain name while excluding one step. - * - * @param name - * The name of the step to look for - * @param exclude - * The step information to exclude. - * @return The step information or null if nothing was found. - */ - public StepMeta findStep( String name, StepMeta exclude ) { - if ( name == null ) { - return null; + /** + * Removes a hop from the transformation. Also marks that the + * transformation's hops have changed. + * + * @param hop The hop to remove from the list of hops + */ + public void removeTransHop(TransHopMeta hop) { + hops.remove(hop); + changed_hops = true; } - int excl = -1; - if ( exclude != null ) { - excl = indexOfStep( exclude ); + /** + * Removes a dependency from the transformation on a certain location (i.e. the specified index). + * + * @param i The location + */ + public void removeDependency(int i) { + if (i < 0 || i >= dependencies.size()) { + return; + } + dependencies.remove(i); } - for ( int i = 0; i < nrSteps(); i++ ) { - StepMeta stepMeta = getStep( i ); - if ( i != excl && stepMeta.getName().equalsIgnoreCase( name ) ) { - return stepMeta; - } + /** + * Clears all the dependencies from the transformation. + */ + public void removeAllDependencies() { + dependencies.clear(); } - return null; - } - /** - * Searches the list of hops for a hop with a certain name. - * - * @param name - * The name of the hop to look for - * @return The hop information or null if nothing was found. - */ - public TransHopMeta findTransHop( String name ) { - int i; + /** + * Gets the number of steps in the transformation. + * + * @return The number of steps in the transformation. + */ + public int nrSteps() { + return steps.size(); + } - for ( i = 0; i < nrTransHops(); i++ ) { - TransHopMeta hi = getTransHop( i ); - if ( hi.toString().equalsIgnoreCase( name ) ) { - return hi; - } + /** + * Gets the number of hops in the transformation. + * + * @return The number of hops in the transformation. + */ + public int nrTransHops() { + return hops.size(); } - return null; - } - /** - * Search all hops for a hop where a certain step is at the start. - * - * @param fromstep - * The step at the start of the hop. - * @return The hop or null if no hop was found. - */ - public TransHopMeta findTransHopFrom( StepMeta fromstep ) { - int i; - for ( i = 0; i < nrTransHops(); i++ ) { - TransHopMeta hi = getTransHop( i ); - if ( hi.getFromStep() != null && hi.getFromStep().equals( fromstep ) ) // return the first - { - return hi; - } - } - return null; - } - - /** - * Find a certain hop in the transformation. - * - * @param hi - * The hop information to look for. - * @return The hop or null if no hop was found. - */ - public TransHopMeta findTransHop( TransHopMeta hi ) { - return findTransHop( hi.getFromStep(), hi.getToStep() ); - } + /** + * Gets the number of dependencies in the transformation. + * + * @return The number of dependencies in the transformation. + */ + public int nrDependencies() { + return dependencies.size(); + } - /** - * Search all hops for a hop where a certain step is at the start and another is at the end. - * - * @param from - * The step at the start of the hop. - * @param to - * The step at the end of the hop. - * @return The hop or null if no hop was found. - */ - public TransHopMeta findTransHop( StepMeta from, StepMeta to ) { - return findTransHop( from, to, false ); - } + /** + * Gets the number of stepChangeListeners in the transformation. + * + * @return The number of stepChangeListeners in the transformation. + */ + public int nrStepChangeListeners() { + return stepChangeListeners.size(); + } - /** - * Search all hops for a hop where a certain step is at the start and another is at the end. - * - * @param from - * The step at the start of the hop. - * @param to - * The step at the end of the hop. - * @param disabledToo - * the disabled too - * @return The hop or null if no hop was found. - */ - public TransHopMeta findTransHop( StepMeta from, StepMeta to, boolean disabledToo ) { - for ( int i = 0; i < nrTransHops(); i++ ) { - TransHopMeta hi = getTransHop( i ); - if ( hi.isEnabled() || disabledToo ) { - if ( hi.getFromStep() != null && hi.getToStep() != null && hi.getFromStep().equals( from ) && hi.getToStep() - .equals( to ) ) { - return hi; + /** + * Changes the content of a step on a certain position. This is accomplished by setting the step's metadata at the + * specified index to the specified meta-data object. The new step's parent transformation is updated to be this + * transformation. + * + * @param i The index into the steps list + * @param stepMeta The step meta-data to set + */ + public void setStep(int i, StepMeta stepMeta) { + StepMetaInterface iface = stepMeta.getStepMetaInterface(); + if (iface instanceof StepMetaChangeListenerInterface) { + addStepChangeListener(i, (StepMetaChangeListenerInterface) stepMeta.getStepMetaInterface()); } - } + steps.set(i, stepMeta); + stepMeta.setParentTransMeta(this); } - return null; - } - /** - * Search all hops for a hop where a certain step is at the end. - * - * @param tostep - * The step at the end of the hop. - * @return The hop or null if no hop was found. - */ - public TransHopMeta findTransHopTo( StepMeta tostep ) { - int i; - for ( i = 0; i < nrTransHops(); i++ ) { - TransHopMeta hi = getTransHop( i ); - if ( hi.getToStep() != null && hi.getToStep().equals( tostep ) ) // Return the first! - { - return hi; - } - } - return null; - } - - /** - * Determines whether or not a certain step is informative. This means that the previous step is sending information - * to this step, but only informative. This means that this step is using the information to process the actual stream - * of data. We use this in StreamLookup, TableInput and other types of steps. - * - * @param this_step - * The step that is receiving information. - * @param prev_step - * The step that is sending information - * @return true if prev_step if informative for this_step. - */ - public boolean isStepInformative( StepMeta this_step, StepMeta prev_step ) { - String[] infoSteps = this_step.getStepMetaInterface().getStepIOMeta().getInfoStepnames(); - if ( infoSteps == null ) { - return false; - } - for ( int i = 0; i < infoSteps.length; i++ ) { - if ( prev_step.getName().equalsIgnoreCase( infoSteps[i] ) ) { - return true; - } + /** + * Changes the content of a hop on a certain position. This is accomplished by setting the hop's metadata at the + * specified index to the specified meta-data object. + * + * @param i The index into the hops list + * @param hi The hop meta-data to set + */ + public void setTransHop(int i, TransHopMeta hi) { + hops.set(i, hi); } - return false; - } - - /** - * Counts the number of previous steps for a step name. - * - * @param stepname - * The name of the step to start from - * @return The number of preceding steps. - * @deprecated - */ - @Deprecated - public int findNrPrevSteps( String stepname ) { - return findNrPrevSteps( findStep( stepname ), false ); - } - - /** - * Counts the number of previous steps for a step name taking into account whether or not they are informational. - * - * @param stepname - * The name of the step to start from - * @param info - * true if only the informational steps are desired, false otherwise - * @return The number of preceding steps. - * @deprecated - */ - @Deprecated - public int findNrPrevSteps( String stepname, boolean info ) { - return findNrPrevSteps( findStep( stepname ), info ); - } - - /** - * Find the number of steps that precede the indicated step. - * - * @param stepMeta - * The source step - * - * @return The number of preceding steps found. - */ - public int findNrPrevSteps( StepMeta stepMeta ) { - return findNrPrevSteps( stepMeta, false ); - } + /** + * Gets the list of used steps, which are the steps that are connected by hops. + * + * @return a list with all the used steps + */ + public List getUsedSteps() { + List list = new ArrayList(); - /** - * Find the previous step on a certain location (i.e. the specified index). - * - * @param stepname - * The source step name - * @param nr - * the index into the step list - * - * @return The preceding step found. - * @deprecated - */ - @Deprecated - public StepMeta findPrevStep( String stepname, int nr ) { - return findPrevStep( findStep( stepname ), nr ); - } + for (StepMeta stepMeta : steps) { + if (isStepUsedInTransHops(stepMeta)) { + list.add(stepMeta); + } + } - /** - * Find the previous step on a certain location taking into account the steps being informational or not. - * - * @param stepname - * The name of the step - * @param nr - * The index into the step list - * @param info - * true if only the informational steps are desired, false otherwise - * @return The step information - * @deprecated - */ - @Deprecated - public StepMeta findPrevStep( String stepname, int nr, boolean info ) { - return findPrevStep( findStep( stepname ), nr, info ); - } + return list; + } - /** - * Find the previous step on a certain location (i.e. the specified index). - * - * @param stepMeta - * The source step information - * @param nr - * the index into the hops list - * - * @return The preceding step found. - */ - public StepMeta findPrevStep( StepMeta stepMeta, int nr ) { - return findPrevStep( stepMeta, nr, false ); - } + /** + * Searches the list of steps for a step with a certain name. + * + * @param name The name of the step to look for + * @return The step information or null if no nothing was found. + */ + public StepMeta findStep(String name) { + return findStep(name, null); + } - /** - * Count the number of previous steps on a certain location taking into account the steps being informational or not. - * - * @param stepMeta - * The name of the step - * @param info - * true if only the informational steps are desired, false otherwise - * @return The number of preceding steps - * @deprecated please use method findPreviousSteps - */ - @Deprecated - public int findNrPrevSteps( StepMeta stepMeta, boolean info ) { - int count = 0; - int i; - - for ( i = 0; i < nrTransHops(); i++ ) { // Look at all the hops; - TransHopMeta hi = getTransHop( i ); - if ( hi.getToStep() != null && hi.isEnabled() && hi.getToStep().equals( stepMeta ) ) { - // Check if this previous step isn't informative (StreamValueLookup) - // We don't want fields from this stream to show up! - if ( info || !isStepInformative( stepMeta, hi.getFromStep() ) ) { - count++; - } - } - } - return count; - } - - /** - * Find the previous step on a certain location taking into account the steps being informational or not. - * - * @param stepMeta - * The step - * @param nr - * The index into the hops list - * @param info - * true if we only want the informational steps. - * @return The preceding step information - * @deprecated please use method findPreviousSteps - */ - @Deprecated - public StepMeta findPrevStep( StepMeta stepMeta, int nr, boolean info ) { - int count = 0; - int i; + /** + * Searches the list of steps for a step with a certain name while excluding one step. + * + * @param name The name of the step to look for + * @param exclude The step information to exclude. + * @return The step information or null if nothing was found. + */ + public StepMeta findStep(String name, StepMeta exclude) { + if (name == null) { + return null; + } - for ( i = 0; i < nrTransHops(); i++ ) { // Look at all the hops; + int excl = -1; + if (exclude != null) { + excl = indexOfStep(exclude); + } - TransHopMeta hi = getTransHop( i ); - if ( hi.getToStep() != null && hi.isEnabled() && hi.getToStep().equals( stepMeta ) ) { - if ( info || !isStepInformative( stepMeta, hi.getFromStep() ) ) { - if ( count == nr ) { - return hi.getFromStep(); - } - count++; + for (int i = 0; i < nrSteps(); i++) { + StepMeta stepMeta = getStep(i); + if (i != excl && stepMeta.getName().equalsIgnoreCase(name)) { + return stepMeta; + } } - } + return null; } - return null; - } - - /** - * Get the list of previous steps for a certain reference step. This includes the info steps. - * - * @param stepMeta - * The reference step - * @return The list of the preceding steps, including the info steps. - */ - public List findPreviousSteps( StepMeta stepMeta ) { - return findPreviousSteps( stepMeta, true ); - } - /** - * Get the previous steps on a certain location taking into account the steps being informational or not. - * - * @param stepMeta - * The name of the step - * @param info - * true if we only want the informational steps. - * @return The list of the preceding steps - */ - public List findPreviousSteps( StepMeta stepMeta, boolean info ) { - List previousSteps = new ArrayList(); + /** + * Searches the list of hops for a hop with a certain name. + * + * @param name The name of the hop to look for + * @return The hop information or null if nothing was found. + */ + public TransHopMeta findTransHop(String name) { + int i; - for ( TransHopMeta hi : hops ) { - if ( hi.getToStep() != null && hi.isEnabled() && hi.getToStep().equals( stepMeta ) ) { - // Check if this previous step isn't informative (StreamValueLookup) - // We don't want fields from this stream to show up! - if ( info || !isStepInformative( stepMeta, hi.getFromStep() ) ) { - previousSteps.add( hi.getFromStep() ); + for (i = 0; i < nrTransHops(); i++) { + TransHopMeta hi = getTransHop(i); + if (hi.toString().equalsIgnoreCase(name)) { + return hi; + } } - } + return null; } - return previousSteps; - } - /** - * Get the informational steps for a certain step. An informational step is a step that provides information for - * lookups, etc. - * - * @param stepMeta - * The name of the step - * @return An array of the informational steps found - */ - public StepMeta[] getInfoStep( StepMeta stepMeta ) { - String[] infoStepName = stepMeta.getStepMetaInterface().getStepIOMeta().getInfoStepnames(); - if ( infoStepName == null ) { - return null; + /** + * Search all hops for a hop where a certain step is at the start. + * + * @param fromstep The step at the start of the hop. + * @return The hop or null if no hop was found. + */ + public TransHopMeta findTransHopFrom(StepMeta fromstep) { + int i; + for (i = 0; i < nrTransHops(); i++) { + TransHopMeta hi = getTransHop(i); + if (hi.getFromStep() != null && hi.getFromStep().equals(fromstep)) // return the first + { + return hi; + } + } + return null; } - StepMeta[] infoStep = new StepMeta[infoStepName.length]; - for ( int i = 0; i < infoStep.length; i++ ) { - infoStep[i] = findStep( infoStepName[i] ); + /** + * Find a certain hop in the transformation. + * + * @param hi The hop information to look for. + * @return The hop or null if no hop was found. + */ + public TransHopMeta findTransHop(TransHopMeta hi) { + return findTransHop(hi.getFromStep(), hi.getToStep()); } - return infoStep; - } - - /** - * Find the the number of informational steps for a certain step. - * - * @param stepMeta - * The step - * @return The number of informational steps found. - */ - public int findNrInfoSteps( StepMeta stepMeta ) { - if ( stepMeta == null ) { - return 0; + /** + * Search all hops for a hop where a certain step is at the start and another is at the end. + * + * @param from The step at the start of the hop. + * @param to The step at the end of the hop. + * @return The hop or null if no hop was found. + */ + public TransHopMeta findTransHop(StepMeta from, StepMeta to) { + return findTransHop(from, to, false); } - int count = 0; - - for ( int i = 0; i < nrTransHops(); i++ ) { // Look at all the hops; - - TransHopMeta hi = getTransHop( i ); - if ( hi == null || hi.getToStep() == null ) { - log.logError( BaseMessages.getString( PKG, "TransMeta.Log.DestinationOfHopCannotBeNull" ) ); - } - if ( hi != null && hi.getToStep() != null && hi.isEnabled() && hi.getToStep().equals( stepMeta ) ) { - // Check if this previous step isn't informative (StreamValueLookup) - // We don't want fields from this stream to show up! - if ( isStepInformative( stepMeta, hi.getFromStep() ) ) { - count++; + /** + * Search all hops for a hop where a certain step is at the start and another is at the end. + * + * @param from The step at the start of the hop. + * @param to The step at the end of the hop. + * @param disabledToo the disabled too + * @return The hop or null if no hop was found. + */ + public TransHopMeta findTransHop(StepMeta from, StepMeta to, boolean disabledToo) { + for (int i = 0; i < nrTransHops(); i++) { + TransHopMeta hi = getTransHop(i); + if (hi.isEnabled() || disabledToo) { + if (hi.getFromStep() != null && hi.getToStep() != null && hi.getFromStep().equals(from) && hi.getToStep() + .equals(to)) { + return hi; + } + } } - } + return null; } - return count; - } - - /** - * Find the informational fields coming from an informational step into the step specified. - * - * @param stepname - * The name of the step - * @return A row containing fields with origin. - * @throws KettleStepException - * the kettle step exception - */ - public RowMetaInterface getPrevInfoFields( String stepname ) throws KettleStepException { - return getPrevInfoFields( findStep( stepname ) ); - } - - /** - * Find the informational fields coming from an informational step into the step specified. - * - * @param stepMeta - * The receiving step - * @return A row containing fields with origin. - * @throws KettleStepException - * the kettle step exception - */ - public RowMetaInterface getPrevInfoFields( StepMeta stepMeta ) throws KettleStepException { - RowMetaInterface row = new RowMeta(); - for ( int i = 0; i < nrTransHops(); i++ ) { // Look at all the hops; - - TransHopMeta hi = getTransHop( i ); - if ( hi.isEnabled() && hi.getToStep().equals( stepMeta ) ) { - StepMeta infoStep = hi.getFromStep(); - if ( isStepInformative( stepMeta, infoStep ) ) { - row = getPrevStepFields( infoStep ); - getThisStepFields( infoStep, stepMeta, row ); - return row; + /** + * Search all hops for a hop where a certain step is at the end. + * + * @param tostep The step at the end of the hop. + * @return The hop or null if no hop was found. + */ + public TransHopMeta findTransHopTo(StepMeta tostep) { + int i; + for (i = 0; i < nrTransHops(); i++) { + TransHopMeta hi = getTransHop(i); + if (hi.getToStep() != null && hi.getToStep().equals(tostep)) // Return the first! + { + return hi; + } } - } + return null; } - return row; - } - - /** - * Find the number of succeeding steps for a certain originating step. - * - * @param stepMeta - * The originating step - * @return The number of succeeding steps. - * @deprecated just get the next steps as an array - */ - @Deprecated - public int findNrNextSteps( StepMeta stepMeta ) { - int count = 0; - int i; - for ( i = 0; i < nrTransHops(); i++ ) { // Look at all the hops; - - TransHopMeta hi = getTransHop( i ); - if ( hi.isEnabled() && hi.getFromStep().equals( stepMeta ) ) { - count++; - } - } - return count; - } - - /** - * Find the succeeding step at a location for an originating step. - * - * @param stepMeta - * The originating step - * @param nr - * The location - * @return The step found. - * @deprecated just get the next steps as an array - */ - @Deprecated - public StepMeta findNextStep( StepMeta stepMeta, int nr ) { - int count = 0; - int i; - - for ( i = 0; i < nrTransHops(); i++ ) { // Look at all the hops; - TransHopMeta hi = getTransHop( i ); - if ( hi.isEnabled() && hi.getFromStep().equals( stepMeta ) ) { - if ( count == nr ) { - return hi.getToStep(); + /** + * Determines whether or not a certain step is informative. This means that the previous step is sending information + * to this step, but only informative. This means that this step is using the information to process the actual stream + * of data. We use this in StreamLookup, TableInput and other types of steps. + * + * @param this_step The step that is receiving information. + * @param prev_step The step that is sending information + * @return true if prev_step if informative for this_step. + */ + public boolean isStepInformative(StepMeta this_step, StepMeta prev_step) { + String[] infoSteps = this_step.getStepMetaInterface().getStepIOMeta().getInfoStepnames(); + if (infoSteps == null) { + return false; + } + for (int i = 0; i < infoSteps.length; i++) { + if (prev_step.getName().equalsIgnoreCase(infoSteps[i])) { + return true; + } } - count++; - } - } - return null; - } - - /** - * Retrieve an array of preceding steps for a certain destination step. This includes the info steps. - * - * @param stepMeta - * The destination step - * @return An array containing the preceding steps. - */ - public StepMeta[] getPrevSteps( StepMeta stepMeta ) { - List prevSteps = new ArrayList(); - for ( int i = 0; i < nrTransHops(); i++ ) { // Look at all the hops; - TransHopMeta hopMeta = getTransHop( i ); - if ( hopMeta.isEnabled() && hopMeta.getToStep().equals( stepMeta ) ) { - prevSteps.add( hopMeta.getFromStep() ); - } + return false; } - return prevSteps.toArray( new StepMeta[prevSteps.size()] ); - } - - /** - * Retrieve an array of succeeding step names for a certain originating step name. - * - * @param stepname - * The originating step name - * @return An array of succeeding step names - */ - public String[] getPrevStepNames( String stepname ) { - return getPrevStepNames( findStep( stepname ) ); - } - - /** - * Retrieve an array of preceding steps for a certain destination step. - * - * @param stepMeta - * The destination step - * @return an array of preceding step names. - */ - public String[] getPrevStepNames( StepMeta stepMeta ) { - StepMeta[] prevStepMetas = getPrevSteps( stepMeta ); - String[] retval = new String[prevStepMetas.length]; - for ( int x = 0; x < prevStepMetas.length; x++ ) { - retval[x] = prevStepMetas[x].getName(); + /** + * Counts the number of previous steps for a step name. + * + * @param stepname The name of the step to start from + * @return The number of preceding steps. + * @deprecated + */ + @Deprecated + public int findNrPrevSteps(String stepname) { + return findNrPrevSteps(findStep(stepname), false); } - return retval; - } + /** + * Counts the number of previous steps for a step name taking into account whether or not they are informational. + * + * @param stepname The name of the step to start from + * @param info true if only the informational steps are desired, false otherwise + * @return The number of preceding steps. + * @deprecated + */ + @Deprecated + public int findNrPrevSteps(String stepname, boolean info) { + return findNrPrevSteps(findStep(stepname), info); + } - /** - * Retrieve an array of succeeding steps for a certain originating step. - * - * @param stepMeta - * The originating step - * @return an array of succeeding steps. - * @deprecated use findNextSteps instead - */ - @Deprecated - public StepMeta[] getNextSteps( StepMeta stepMeta ) { - List nextSteps = new ArrayList(); - for ( int i = 0; i < nrTransHops(); i++ ) { // Look at all the hops; + /** + * Find the number of steps that precede the indicated step. + * + * @param stepMeta The source step + * @return The number of preceding steps found. + */ + public int findNrPrevSteps(StepMeta stepMeta) { + return findNrPrevSteps(stepMeta, false); + } - TransHopMeta hi = getTransHop( i ); - if ( hi.isEnabled() && hi.getFromStep().equals( stepMeta ) ) { - nextSteps.add( hi.getToStep() ); - } + /** + * Find the previous step on a certain location (i.e. the specified index). + * + * @param stepname The source step name + * @param nr the index into the step list + * @return The preceding step found. + * @deprecated + */ + @Deprecated + public StepMeta findPrevStep(String stepname, int nr) { + return findPrevStep(findStep(stepname), nr); } - return nextSteps.toArray( new StepMeta[nextSteps.size()] ); - } - - /** - * Retrieve a list of succeeding steps for a certain originating step. - * - * @param stepMeta - * The originating step - * @return an array of succeeding steps. - */ - public List findNextSteps( StepMeta stepMeta ) { - List nextSteps = new ArrayList(); - for ( int i = 0; i < nrTransHops(); i++ ) { // Look at all the hops; - - TransHopMeta hi = getTransHop( i ); - if ( hi.isEnabled() && hi.getFromStep().equals( stepMeta ) ) { - nextSteps.add( hi.getToStep() ); - } + /** + * Find the previous step on a certain location taking into account the steps being informational or not. + * + * @param stepname The name of the step + * @param nr The index into the step list + * @param info true if only the informational steps are desired, false otherwise + * @return The step information + * @deprecated + */ + @Deprecated + public StepMeta findPrevStep(String stepname, int nr, boolean info) { + return findPrevStep(findStep(stepname), nr, info); } - return nextSteps; - } - - /** - * Retrieve an array of succeeding step names for a certain originating step. - * - * @param stepMeta - * The originating step - * @return an array of succeeding step names. - */ - public String[] getNextStepNames( StepMeta stepMeta ) { - StepMeta[] nextStepMeta = getNextSteps( stepMeta ); - String[] retval = new String[nextStepMeta.length]; - for ( int x = 0; x < nextStepMeta.length; x++ ) { - retval[x] = nextStepMeta[x].getName(); + /** + * Find the previous step on a certain location (i.e. the specified index). + * + * @param stepMeta The source step information + * @param nr the index into the hops list + * @return The preceding step found. + */ + public StepMeta findPrevStep(StepMeta stepMeta, int nr) { + return findPrevStep(stepMeta, nr, false); } - return retval; - } - - /** - * Find the step that is located on a certain point on the canvas, taking into account the icon size. - * - * @param x - * the x-coordinate of the point queried - * @param y - * the y-coordinate of the point queried - * @param iconsize - * the iconsize - * @return The step information if a step is located at the point. Otherwise, if no step was found: null. - */ - public StepMeta getStep( int x, int y, int iconsize ) { - int i, s; - s = steps.size(); - for ( i = s - 1; i >= 0; i-- ) // Back to front because drawing goes from start to end - { - StepMeta stepMeta = steps.get( i ); - if ( partOfTransHop( stepMeta ) || stepMeta.isDrawn() ) // Only consider steps from active or inactive hops! - { - Point p = stepMeta.getLocation(); - if ( p != null ) { - if ( x >= p.x && x <= p.x + iconsize && y >= p.y && y <= p.y + iconsize + 20 ) { - return stepMeta; - } + /** + * Count the number of previous steps on a certain location taking into account the steps being informational or not. + * + * @param stepMeta The name of the step + * @param info true if only the informational steps are desired, false otherwise + * @return The number of preceding steps + * @deprecated please use method findPreviousSteps + */ + @Deprecated + public int findNrPrevSteps(StepMeta stepMeta, boolean info) { + int count = 0; + int i; + + for (i = 0; i < nrTransHops(); i++) { // Look at all the hops; + TransHopMeta hi = getTransHop(i); + if (hi.getToStep() != null && hi.isEnabled() && hi.getToStep().equals(stepMeta)) { + // Check if this previous step isn't informative (StreamValueLookup) + // We don't want fields from this stream to show up! + if (info || !isStepInformative(stepMeta, hi.getFromStep())) { + count++; + } + } } - } + return count; } - return null; - } - /** - * Determines whether or not a certain step is part of a hop. - * - * @param stepMeta - * The step queried - * @return true if the step is part of a hop. - */ - public boolean partOfTransHop( StepMeta stepMeta ) { - int i; - for ( i = 0; i < nrTransHops(); i++ ) { - TransHopMeta hi = getTransHop( i ); - if ( hi.getFromStep() == null || hi.getToStep() == null ) { - return false; - } - if ( hi.getFromStep().equals( stepMeta ) || hi.getToStep().equals( stepMeta ) ) { - return true; - } + /** + * Find the previous step on a certain location taking into account the steps being informational or not. + * + * @param stepMeta The step + * @param nr The index into the hops list + * @param info true if we only want the informational steps. + * @return The preceding step information + * @deprecated please use method findPreviousSteps + */ + @Deprecated + public StepMeta findPrevStep(StepMeta stepMeta, int nr, boolean info) { + int count = 0; + int i; + + for (i = 0; i < nrTransHops(); i++) { // Look at all the hops; + + TransHopMeta hi = getTransHop(i); + if (hi.getToStep() != null && hi.isEnabled() && hi.getToStep().equals(stepMeta)) { + if (info || !isStepInformative(stepMeta, hi.getFromStep())) { + if (count == nr) { + return hi.getFromStep(); + } + count++; + } + } + } + return null; } - return false; - } - /** - * Returns the fields that are emitted by a certain step name. - * - * @param stepname - * The stepname of the step to be queried. - * @return A row containing the fields emitted. - * @throws KettleStepException - * the kettle step exception - */ - public RowMetaInterface getStepFields( String stepname ) throws KettleStepException { - StepMeta stepMeta = findStep( stepname ); - if ( stepMeta != null ) { - return getStepFields( stepMeta ); - } else { - return null; + /** + * Get the list of previous steps for a certain reference step. This includes the info steps. + * + * @param stepMeta The reference step + * @return The list of the preceding steps, including the info steps. + */ + public List findPreviousSteps(StepMeta stepMeta) { + return findPreviousSteps(stepMeta, true); } - } - - /** - * Returns the fields that are emitted by a certain step. - * - * @param stepMeta - * The step to be queried. - * @return A row containing the fields emitted. - * @throws KettleStepException - * the kettle step exception - */ - public RowMetaInterface getStepFields( StepMeta stepMeta ) throws KettleStepException { - return getStepFields( stepMeta, null ); - } - - /** - * Gets the fields for each of the specified steps and merges them into a single set - * - * @param stepMeta - * the step meta - * @return an interface to the step fields - * @throws KettleStepException - * the kettle step exception - */ - public RowMetaInterface getStepFields( StepMeta[] stepMeta ) throws KettleStepException { - RowMetaInterface fields = new RowMeta(); - for ( int i = 0; i < stepMeta.length; i++ ) { - RowMetaInterface flds = getStepFields( stepMeta[i] ); - if ( flds != null ) { - fields.mergeRowMeta( flds ); - } + /** + * Get the previous steps on a certain location taking into account the steps being informational or not. + * + * @param stepMeta The name of the step + * @param info true if we only want the informational steps. + * @return The list of the preceding steps + */ + public List findPreviousSteps(StepMeta stepMeta, boolean info) { + List previousSteps = new ArrayList(); + + for (TransHopMeta hi : hops) { + if (hi.getToStep() != null && hi.isEnabled() && hi.getToStep().equals(stepMeta)) { + // Check if this previous step isn't informative (StreamValueLookup) + // We don't want fields from this stream to show up! + if (info || !isStepInformative(stepMeta, hi.getFromStep())) { + previousSteps.add(hi.getFromStep()); + } + } + } + return previousSteps; } - return fields; - } - /** - * Returns the fields that are emitted by a certain step. - * - * @param stepMeta - * The step to be queried. - * @param monitor - * The progress monitor for progress dialog. (null if not used!) - * @return A row containing the fields emitted. - * @throws KettleStepException - * the kettle step exception - */ - public RowMetaInterface getStepFields( StepMeta stepMeta, ProgressMonitorListener monitor ) throws KettleStepException { - clearStepFieldsCachce(); - setRepositoryOnMappingSteps(); - return getStepFields( stepMeta, null, monitor ); - } - - /** - * Returns the fields that are emitted by a certain step. - * - * @param stepMeta - * The step to be queried. - * @param targetStep - * the target step - * @param monitor - * The progress monitor for progress dialog. (null if not used!) - * @return A row containing the fields emitted. - * @throws KettleStepException - * the kettle step exception - */ - public RowMetaInterface getStepFields( StepMeta stepMeta, StepMeta targetStep, ProgressMonitorListener monitor ) throws KettleStepException { - RowMetaInterface row = new RowMeta(); + /** + * Get the informational steps for a certain step. An informational step is a step that provides information for + * lookups, etc. + * + * @param stepMeta The name of the step + * @return An array of the informational steps found + */ + public StepMeta[] getInfoStep(StepMeta stepMeta) { + String[] infoStepName = stepMeta.getStepMetaInterface().getStepIOMeta().getInfoStepnames(); + if (infoStepName == null) { + return null; + } - if ( stepMeta == null ) { - return row; - } + StepMeta[] infoStep = new StepMeta[infoStepName.length]; + for (int i = 0; i < infoStep.length; i++) { + infoStep[i] = findStep(infoStepName[i]); + } - String fromToCacheEntry = stepMeta.getName() + ( targetStep != null ? ( "-" + targetStep.getName() ) : "" ); - RowMetaInterface rowMeta = stepsFieldsCache.get( fromToCacheEntry ); - if ( rowMeta != null ) { - return rowMeta; + return infoStep; } - // See if the step is sending ERROR rows to the specified target step. - // - if ( targetStep != null && stepMeta.isSendingErrorRowsToStep( targetStep ) ) { - // The error rows are the same as the input rows for - // the step but with the selected error fields added - // - row = getPrevStepFields( stepMeta ); + /** + * Find the the number of informational steps for a certain step. + * + * @param stepMeta The step + * @return The number of informational steps found. + */ + public int findNrInfoSteps(StepMeta stepMeta) { + if (stepMeta == null) { + return 0; + } - // Add to this the error fields... - StepErrorMeta stepErrorMeta = stepMeta.getStepErrorMeta(); - row.addRowMeta( stepErrorMeta.getErrorFields() ); + int count = 0; - // Store this row in the cache - // - stepsFieldsCache.put( fromToCacheEntry, row ); + for (int i = 0; i < nrTransHops(); i++) { // Look at all the hops; - return row; + TransHopMeta hi = getTransHop(i); + if (hi == null || hi.getToStep() == null) { + log.logError(BaseMessages.getString(PKG, "TransMeta.Log.DestinationOfHopCannotBeNull")); + } + if (hi != null && hi.getToStep() != null && hi.isEnabled() && hi.getToStep().equals(stepMeta)) { + // Check if this previous step isn't informative (StreamValueLookup) + // We don't want fields from this stream to show up! + if (isStepInformative(stepMeta, hi.getFromStep())) { + count++; + } + } + } + return count; } - // Resume the regular program... - - if ( log.isDebug() ) { - log.logDebug( BaseMessages.getString( PKG, "TransMeta.Log.FromStepALookingAtPreviousStep", stepMeta.getName(), - String.valueOf( findNrPrevSteps( stepMeta ) ) ) ); + /** + * Find the informational fields coming from an informational step into the step specified. + * + * @param stepname The name of the step + * @return A row containing fields with origin. + * @throws KettleStepException the kettle step exception + */ + public RowMetaInterface getPrevInfoFields(String stepname) throws KettleStepException { + return getPrevInfoFields(findStep(stepname)); } - int nrPrevious = findNrPrevSteps( stepMeta ); - for ( int i = 0; i < nrPrevious; i++ ) { - StepMeta prevStepMeta = findPrevStep( stepMeta, i ); - if ( monitor != null ) { - monitor.subTask( - BaseMessages.getString( PKG, "TransMeta.Monitor.CheckingStepTask.Title", prevStepMeta.getName() ) ); - } - - RowMetaInterface add = getStepFields( prevStepMeta, stepMeta, monitor ); - if ( add == null ) { - add = new RowMeta(); - } - if ( log.isDebug() ) { - log.logDebug( BaseMessages.getString( PKG, "TransMeta.Log.FoundFieldsToAdd" ) + add.toString() ); - } - if ( i == 0 ) { - row.addRowMeta( add ); - } else { - // See if the add fields are not already in the row - for ( int x = 0; x < add.size(); x++ ) { - ValueMetaInterface v = add.getValueMeta( x ); - ValueMetaInterface s = row.searchValueMeta( v.getName() ); - if ( s == null ) { - row.addValueMeta( v ); - } + /** + * Find the informational fields coming from an informational step into the step specified. + * + * @param stepMeta The receiving step + * @return A row containing fields with origin. + * @throws KettleStepException the kettle step exception + */ + public RowMetaInterface getPrevInfoFields(StepMeta stepMeta) throws KettleStepException { + RowMetaInterface row = new RowMeta(); + + for (int i = 0; i < nrTransHops(); i++) { // Look at all the hops; + + TransHopMeta hi = getTransHop(i); + if (hi.isEnabled() && hi.getToStep().equals(stepMeta)) { + StepMeta infoStep = hi.getFromStep(); + if (isStepInformative(stepMeta, infoStep)) { + row = getPrevStepFields(infoStep); + getThisStepFields(infoStep, stepMeta, row); + return row; + } + } } - } + return row; } - if ( nrPrevious == 0 && stepMeta.getRemoteInputSteps().size() > 0 ) { - // Also check the remote input steps (clustering) - // Typically, if there are any, row is still empty at this point - // We'll also be at a starting point in the transformation - // - for ( RemoteStep remoteStep : stepMeta.getRemoteInputSteps() ) { - RowMetaInterface inputFields = remoteStep.getRowMeta(); - for ( ValueMetaInterface inputField : inputFields.getValueMetaList() ) { - if ( row.searchValueMeta( inputField.getName() ) == null ) { - row.addValueMeta( inputField ); - } + /** + * Find the number of succeeding steps for a certain originating step. + * + * @param stepMeta The originating step + * @return The number of succeeding steps. + * @deprecated just get the next steps as an array + */ + @Deprecated + public int findNrNextSteps(StepMeta stepMeta) { + int count = 0; + int i; + for (i = 0; i < nrTransHops(); i++) { // Look at all the hops; + + TransHopMeta hi = getTransHop(i); + if (hi.isEnabled() && hi.getFromStep().equals(stepMeta)) { + count++; + } } - } + return count; } - // Finally, see if we need to add/modify/delete fields with this step "name" - rowMeta = getThisStepFields( stepMeta, targetStep, row, monitor ); - - // Store this row in the cache - // - stepsFieldsCache.put( fromToCacheEntry, rowMeta ); + /** + * Find the succeeding step at a location for an originating step. + * + * @param stepMeta The originating step + * @param nr The location + * @return The step found. + * @deprecated just get the next steps as an array + */ + @Deprecated + public StepMeta findNextStep(StepMeta stepMeta, int nr) { + int count = 0; + int i; - return rowMeta; - } + for (i = 0; i < nrTransHops(); i++) { // Look at all the hops; - /** - * Find the fields that are entering a step with a certain name. - * - * @param stepname - * The name of the step queried - * @return A row containing the fields (w/ origin) entering the step - * @throws KettleStepException - * the kettle step exception - */ - public RowMetaInterface getPrevStepFields( String stepname ) throws KettleStepException { - clearStepFieldsCachce(); - return getPrevStepFields( findStep( stepname ) ); - } + TransHopMeta hi = getTransHop(i); + if (hi.isEnabled() && hi.getFromStep().equals(stepMeta)) { + if (count == nr) { + return hi.getToStep(); + } + count++; + } + } + return null; + } - /** - * Find the fields that are entering a certain step. - * - * @param stepMeta - * The step queried - * @return A row containing the fields (w/ origin) entering the step - * @throws KettleStepException - * the kettle step exception - */ - public RowMetaInterface getPrevStepFields( StepMeta stepMeta ) throws KettleStepException { - clearStepFieldsCachce(); - return getPrevStepFields( stepMeta, null ); - } + /** + * Retrieve an array of preceding steps for a certain destination step. This includes the info steps. + * + * @param stepMeta The destination step + * @return An array containing the preceding steps. + */ + public StepMeta[] getPrevSteps(StepMeta stepMeta) { + List prevSteps = new ArrayList(); + for (int i = 0; i < nrTransHops(); i++) { // Look at all the hops; - /** - * Find the fields that are entering a certain step. - * - * @param stepMeta - * The step queried - * @param monitor - * The progress monitor for progress dialog. (null if not used!) - * @return A row containing the fields (w/ origin) entering the step - * @throws KettleStepException - * the kettle step exception - */ - public RowMetaInterface getPrevStepFields( StepMeta stepMeta, ProgressMonitorListener monitor ) throws KettleStepException { - clearStepFieldsCachce(); + TransHopMeta hopMeta = getTransHop(i); + if (hopMeta.isEnabled() && hopMeta.getToStep().equals(stepMeta)) { + prevSteps.add(hopMeta.getFromStep()); + } + } - RowMetaInterface row = new RowMeta(); + return prevSteps.toArray(new StepMeta[prevSteps.size()]); + } - if ( stepMeta == null ) { - return null; + /** + * Retrieve an array of succeeding step names for a certain originating step name. + * + * @param stepname The originating step name + * @return An array of succeeding step names + */ + public String[] getPrevStepNames(String stepname) { + return getPrevStepNames(findStep(stepname)); } - if ( log.isDebug() ) { - log.logDebug( BaseMessages.getString( PKG, "TransMeta.Log.FromStepALookingAtPreviousStep", stepMeta.getName(), - String.valueOf( findNrPrevSteps( stepMeta ) ) ) ); + /** + * Retrieve an array of preceding steps for a certain destination step. + * + * @param stepMeta The destination step + * @return an array of preceding step names. + */ + public String[] getPrevStepNames(StepMeta stepMeta) { + StepMeta[] prevStepMetas = getPrevSteps(stepMeta); + String[] retval = new String[prevStepMetas.length]; + for (int x = 0; x < prevStepMetas.length; x++) { + retval[x] = prevStepMetas[x].getName(); + } + + return retval; } - for ( int i = 0; i < findNrPrevSteps( stepMeta ); i++ ) { - StepMeta prevStepMeta = findPrevStep( stepMeta, i ); - if ( monitor != null ) { - monitor.subTask( - BaseMessages.getString( PKG, "TransMeta.Monitor.CheckingStepTask.Title", prevStepMeta.getName() ) ); - } + /** + * Retrieve an array of succeeding steps for a certain originating step. + * + * @param stepMeta The originating step + * @return an array of succeeding steps. + * @deprecated use findNextSteps instead + */ + @Deprecated + public StepMeta[] getNextSteps(StepMeta stepMeta) { + List nextSteps = new ArrayList(); + for (int i = 0; i < nrTransHops(); i++) { // Look at all the hops; + + TransHopMeta hi = getTransHop(i); + if (hi.isEnabled() && hi.getFromStep().equals(stepMeta)) { + nextSteps.add(hi.getToStep()); + } + } - RowMetaInterface add = getStepFields( prevStepMeta, stepMeta, monitor ); + return nextSteps.toArray(new StepMeta[nextSteps.size()]); + } - if ( log.isDebug() ) { - log.logDebug( BaseMessages.getString( PKG, "TransMeta.Log.FoundFieldsToAdd2" ) + add.toString() ); - } - if ( i == 0 ) { - // we expect all input streams to be of the same layout! + /** + * Retrieve a list of succeeding steps for a certain originating step. + * + * @param stepMeta The originating step + * @return an array of succeeding steps. + */ + public List findNextSteps(StepMeta stepMeta) { + List nextSteps = new ArrayList(); + for (int i = 0; i < nrTransHops(); i++) { // Look at all the hops; - row.addRowMeta( add ); // recursive! - } else { - // See if the add fields are not already in the row - for ( int x = 0; x < add.size(); x++ ) { - ValueMetaInterface v = add.getValueMeta( x ); - ValueMetaInterface s = row.searchValueMeta( v.getName() ); - if ( s == null ) { - row.addValueMeta( v ); - } + TransHopMeta hi = getTransHop(i); + if (hi.isEnabled() && hi.getFromStep().equals(stepMeta)) { + nextSteps.add(hi.getToStep()); + } } - } + + return nextSteps; } - return row; - } - /** - * Return the fields that are emitted by a step with a certain name. - * - * @param stepname - * The name of the step that's being queried. - * @param row - * A row containing the input fields or an empty row if no input is required. - * @return A Row containing the output fields. - * @throws KettleStepException - * the kettle step exception - */ - public RowMetaInterface getThisStepFields( String stepname, RowMetaInterface row ) throws KettleStepException { - return getThisStepFields( findStep( stepname ), null, row ); - } + /** + * Retrieve an array of succeeding step names for a certain originating step. + * + * @param stepMeta The originating step + * @return an array of succeeding step names. + */ + public String[] getNextStepNames(StepMeta stepMeta) { + StepMeta[] nextStepMeta = getNextSteps(stepMeta); + String[] retval = new String[nextStepMeta.length]; + for (int x = 0; x < nextStepMeta.length; x++) { + retval[x] = nextStepMeta[x].getName(); + } - /** - * Returns the fields that are emitted by a step. - * - * @param stepMeta - * : The StepMeta object that's being queried - * @param nextStep - * : if non-null this is the next step that's call back to ask what's being sent - * @param row - * : A row containing the input fields or an empty row if no input is required. - * @return A Row containing the output fields. - * @throws KettleStepException - * the kettle step exception - */ - public RowMetaInterface getThisStepFields( StepMeta stepMeta, StepMeta nextStep, RowMetaInterface row ) throws KettleStepException { - return getThisStepFields( stepMeta, nextStep, row, null ); - } + return retval; + } - /** - * Returns the fields that are emitted by a step. - * - * @param stepMeta - * : The StepMeta object that's being queried - * @param nextStep - * : if non-null this is the next step that's call back to ask what's being sent - * @param row - * : A row containing the input fields or an empty row if no input is required. - * @param monitor - * the monitor - * @return A Row containing the output fields. - * @throws KettleStepException - * the kettle step exception - */ - public RowMetaInterface getThisStepFields( StepMeta stepMeta, StepMeta nextStep, RowMetaInterface row, - ProgressMonitorListener monitor ) throws KettleStepException { - // Then this one. - if ( log.isDebug() ) { - log.logDebug( BaseMessages - .getString( PKG, "TransMeta.Log.GettingFieldsFromStep", stepMeta.getName(), stepMeta.getStepID() ) ); - } - String name = stepMeta.getName(); - - if ( monitor != null ) { - monitor.subTask( BaseMessages.getString( PKG, "TransMeta.Monitor.GettingFieldsFromStepTask.Title", name ) ); - } - - StepMetaInterface stepint = stepMeta.getStepMetaInterface(); - RowMetaInterface[] inform = null; - StepMeta[] lu = getInfoStep( stepMeta ); - if ( Const.isEmpty( lu ) ) { - inform = new RowMetaInterface[] { stepint.getTableFields(), }; - } else { - inform = new RowMetaInterface[lu.length]; - for ( int i = 0; i < lu.length; i++ ) { - inform[i] = getStepFields( lu[i] ); - } - } - - setRepositoryOnMappingSteps(); - - // Go get the fields... - // - RowMetaInterface before = row.clone(); - compatibleGetStepFields( stepint, row, name, inform, nextStep, this ); - if ( !isSomethingDifferentInRow( before, row ) ) { - stepint.getFields( before, name, inform, nextStep, this, repository, metaStore ); - // pass the clone object to prevent from spoiling data by other steps - row = before; - } - - return row; - } - - @SuppressWarnings( "deprecation" ) - private void compatibleGetStepFields( StepMetaInterface stepint, RowMetaInterface row, String name, - RowMetaInterface[] inform, StepMeta nextStep, VariableSpace space ) throws KettleStepException { - - stepint.getFields( row, name, inform, nextStep, space ); - - } - - private boolean isSomethingDifferentInRow( RowMetaInterface before, RowMetaInterface after ) { - if ( before.size() != after.size() ) { - return true; - } - for ( int i = 0; i < before.size(); i++ ) { - ValueMetaInterface beforeValueMeta = before.getValueMeta( i ); - ValueMetaInterface afterValueMeta = after.getValueMeta( i ); - if ( stringsDifferent( beforeValueMeta.getName(), afterValueMeta.getName() ) ) { - return true; - } - if ( beforeValueMeta.getType() != afterValueMeta.getType() ) { - return true; - } - if ( beforeValueMeta.getLength() != afterValueMeta.getLength() ) { - return true; - } - if ( beforeValueMeta.getPrecision() != afterValueMeta.getPrecision() ) { - return true; - } - if ( stringsDifferent( beforeValueMeta.getOrigin(), afterValueMeta.getOrigin() ) ) { - return true; - } - if ( stringsDifferent( beforeValueMeta.getComments(), afterValueMeta.getComments() ) ) { - return true; - } - if ( stringsDifferent( beforeValueMeta.getConversionMask(), afterValueMeta.getConversionMask() ) ) { - return true; - } - if ( stringsDifferent( beforeValueMeta.getStringEncoding(), afterValueMeta.getStringEncoding() ) ) { - return true; - } - if ( stringsDifferent( beforeValueMeta.getDecimalSymbol(), afterValueMeta.getDecimalSymbol() ) ) { - return true; - } - if ( stringsDifferent( beforeValueMeta.getGroupingSymbol(), afterValueMeta.getGroupingSymbol() ) ) { - return true; - } - } - return false; - } - - private boolean stringsDifferent( String one, String two ) { - if ( one == null && two == null ) { - return false; - } - if ( one == null && two != null ) { - return true; - } - if ( one != null && two == null ) { - return true; - } - return !one.equals( two ); - } - - /** - * Set the Repository object on the Mapping step That way the mapping step can determine the output fields for - * repository hosted mappings... This is the exception to the rule so we don't pass this through the getFields() - * method. TODO: figure out a way to make this more generic. - */ - private void setRepositoryOnMappingSteps() { - - for ( StepMeta step : steps ) { - if ( step.getStepMetaInterface() instanceof MappingMeta ) { - ( (MappingMeta) step.getStepMetaInterface() ).setRepository( repository ); - ( (MappingMeta) step.getStepMetaInterface() ).setMetaStore( metaStore ); - } - if ( step.getStepMetaInterface() instanceof SingleThreaderMeta ) { - ( (SingleThreaderMeta) step.getStepMetaInterface() ).setRepository( repository ); - ( (SingleThreaderMeta) step.getStepMetaInterface() ).setMetaStore( metaStore ); - } - if ( step.getStepMetaInterface() instanceof JobExecutorMeta ) { - ( (JobExecutorMeta) step.getStepMetaInterface() ).setRepository( repository ); - ( (JobExecutorMeta) step.getStepMetaInterface() ).setMetaStore( metaStore ); - } - if ( step.getStepMetaInterface() instanceof TransExecutorMeta ) { - ( (TransExecutorMeta) step.getStepMetaInterface() ).setRepository( repository ); - ( (TransExecutorMeta) step.getStepMetaInterface() ).setMetaStore( metaStore ); - } - } - } - - /** - * Checks if the transformation is using the specified partition schema. - * - * @param partitionSchema - * the partition schema - * @return true if the transformation is using the partition schema, false otherwise - */ - public boolean isUsingPartitionSchema( PartitionSchema partitionSchema ) { - // Loop over all steps and see if the partition schema is used. - for ( int i = 0; i < nrSteps(); i++ ) { - StepPartitioningMeta stepPartitioningMeta = getStep( i ).getStepPartitioningMeta(); - if ( stepPartitioningMeta != null ) { - PartitionSchema check = stepPartitioningMeta.getPartitionSchema(); - if ( check != null && check.equals( partitionSchema ) ) { - return true; - } - } - } - return false; - } - - /** - * Checks if the transformation is using a cluster schema. - * - * @return true if a cluster schema is used on one or more steps in this transformation, false otherwise - */ - public boolean isUsingAClusterSchema() { - return isUsingClusterSchema( null ); - } + /** + * Find the step that is located on a certain point on the canvas, taking into account the icon size. + * + * @param x the x-coordinate of the point queried + * @param y the y-coordinate of the point queried + * @param iconsize the iconsize + * @return The step information if a step is located at the point. Otherwise, if no step was found: null. + */ + public StepMeta getStep(int x, int y, int iconsize) { + int i, s; + s = steps.size(); + for (i = s - 1; i >= 0; i--) // Back to front because drawing goes from start to end + { + StepMeta stepMeta = steps.get(i); + if (partOfTransHop(stepMeta) || stepMeta.isDrawn()) // Only consider steps from active or inactive hops! + { + Point p = stepMeta.getLocation(); + if (p != null) { + if (x >= p.x && x <= p.x + iconsize && y >= p.y && y <= p.y + iconsize + 20) { + return stepMeta; + } + } + } + } + return null; + } - /** - * Checks if the transformation is using the specified cluster schema. - * - * @param clusterSchema - * the cluster schema to check - * @return true if the specified cluster schema is used on one or more steps in this transformation - */ - public boolean isUsingClusterSchema( ClusterSchema clusterSchema ) { - // Loop over all steps and see if the partition schema is used. - for ( int i = 0; i < nrSteps(); i++ ) { - ClusterSchema check = getStep( i ).getClusterSchema(); - if ( check != null && ( clusterSchema == null || check.equals( clusterSchema ) ) ) { - return true; - } - } - return false; - } - - /** - * Checks if the transformation is using the specified slave server. - * - * @param slaveServer - * the slave server - * @return true if the transformation is using the slave server, false otherwise - * @throws KettleException - * if any errors occur while checking for the slave server - */ - public boolean isUsingSlaveServer( SlaveServer slaveServer ) throws KettleException { - // Loop over all steps and see if the slave server is used. - for ( int i = 0; i < nrSteps(); i++ ) { - ClusterSchema clusterSchema = getStep( i ).getClusterSchema(); - if ( clusterSchema != null ) { - for ( SlaveServer check : clusterSchema.getSlaveServers() ) { - if ( check.equals( slaveServer ) ) { - return true; - } + /** + * Determines whether or not a certain step is part of a hop. + * + * @param stepMeta The step queried + * @return true if the step is part of a hop. + */ + public boolean partOfTransHop(StepMeta stepMeta) { + int i; + for (i = 0; i < nrTransHops(); i++) { + TransHopMeta hi = getTransHop(i); + if (hi.getFromStep() == null || hi.getToStep() == null) { + return false; + } + if (hi.getFromStep().equals(stepMeta) || hi.getToStep().equals(stepMeta)) { + return true; + } } - return true; - } + return false; } - return false; - } - /** - * Checks if the transformation is referenced by a repository. - * - * @return true if the transformation is referenced by a repository, false otherwise - */ - public boolean isRepReference() { - return isRepReference( getFilename(), this.getName() ); - } + /** + * Returns the fields that are emitted by a certain step name. + * + * @param stepname The stepname of the step to be queried. + * @return A row containing the fields emitted. + * @throws KettleStepException the kettle step exception + */ + public RowMetaInterface getStepFields(String stepname) throws KettleStepException { + StepMeta stepMeta = findStep(stepname); + if (stepMeta != null) { + return getStepFields(stepMeta); + } else { + return null; + } + } - /** - * Checks if the transformation is referenced by a file. If the transformation is not referenced by a repository, it - * is assumed to be referenced by a file. - * - * @return true if the transformation is referenced by a file, false otherwise - * @see #isRepReference() - */ - public boolean isFileReference() { - return !isRepReference( getFilename(), this.getName() ); - } - - /** - * Checks (using the exact filename and transformation name) if the transformation is referenced by a repository. If - * referenced by a repository, the exact filename should be empty and the exact transformation name should be - * non-empty. - * - * @param exactFilename - * the exact filename - * @param exactTransname - * the exact transformation name - * @return true if the transformation is referenced by a repository, false otherwise - */ - public static boolean isRepReference( String exactFilename, String exactTransname ) { - return Const.isEmpty( exactFilename ) && !Const.isEmpty( exactTransname ); - } - - /** - * Checks (using the exact filename and transformation name) if the transformation is referenced by a file. If - * referenced by a repository, the exact filename should be non-empty and the exact transformation name should be - * empty. - * - * @param exactFilename - * the exact filename - * @param exactTransname - * the exact transformation name - * @return true if the transformation is referenced by a file, false otherwise - * @see #isRepReference(String, String) - */ - public static boolean isFileReference( String exactFilename, String exactTransname ) { - return !isRepReference( exactFilename, exactTransname ); - } + /** + * Returns the fields that are emitted by a certain step. + * + * @param stepMeta The step to be queried. + * @return A row containing the fields emitted. + * @throws KettleStepException the kettle step exception + */ + public RowMetaInterface getStepFields(StepMeta stepMeta) throws KettleStepException { + return getStepFields(stepMeta, null); + } - /** - * Finds the location (index) of the specified hop. - * - * @param hi - * The hop queried - * @return The location of the hop, or -1 if nothing was found. - */ - public int indexOfTransHop( TransHopMeta hi ) { - return hops.indexOf( hi ); - } + /** + * Gets the fields for each of the specified steps and merges them into a single set + * + * @param stepMeta the step meta + * @return an interface to the step fields + * @throws KettleStepException the kettle step exception + */ + public RowMetaInterface getStepFields(StepMeta[] stepMeta) throws KettleStepException { + RowMetaInterface fields = new RowMeta(); - /** - * Finds the location (index) of the specified step. - * - * @param stepMeta - * The step queried - * @return The location of the step, or -1 if nothing was found. - */ - public int indexOfStep( StepMeta stepMeta ) { - return steps.indexOf( stepMeta ); - } + for (int i = 0; i < stepMeta.length; i++) { + RowMetaInterface flds = getStepFields(stepMeta[i]); + if (flds != null) { + fields.mergeRowMeta(flds); + } + } + return fields; + } - /** - * Gets the file type. For TransMeta, this returns a value corresponding to Transformation - * - * @return the file type - * @see org.pentaho.di.core.EngineMetaInterface#getFileType() - */ - public String getFileType() { - return LastUsedFile.FILE_TYPE_TRANSFORMATION; - } + /** + * Returns the fields that are emitted by a certain step. + * + * @param stepMeta The step to be queried. + * @param monitor The progress monitor for progress dialog. (null if not used!) + * @return A row containing the fields emitted. + * @throws KettleStepException the kettle step exception + */ + public RowMetaInterface getStepFields(StepMeta stepMeta, ProgressMonitorListener monitor) throws KettleStepException { + clearStepFieldsCachce(); + setRepositoryOnMappingSteps(); + return getStepFields(stepMeta, null, monitor); + } - /** - * Gets the transformation filter names. - * - * @return the filter names - * @see org.pentaho.di.core.EngineMetaInterface#getFilterNames() - */ - public String[] getFilterNames() { - return Const.getTransformationFilterNames(); - } + /** + * Returns the fields that are emitted by a certain step. + * + * @param stepMeta The step to be queried. + * @param targetStep the target step + * @param monitor The progress monitor for progress dialog. (null if not used!) + * @return A row containing the fields emitted. + * @throws KettleStepException the kettle step exception + */ + public RowMetaInterface getStepFields(StepMeta stepMeta, StepMeta targetStep, ProgressMonitorListener monitor) throws KettleStepException { + RowMetaInterface row = new RowMeta(); - /** - * Gets the transformation filter extensions. For TransMeta, this method returns the value of - * {@link Const#STRING_TRANS_FILTER_EXT} - * - * @return the filter extensions - * @see org.pentaho.di.core.EngineMetaInterface#getFilterExtensions() - */ - public String[] getFilterExtensions() { - return Const.STRING_TRANS_FILTER_EXT; - } + if (stepMeta == null) { + return row; + } - /** - * Gets the default extension for a transformation. For TransMeta, this method returns the value of - * {@link Const#STRING_TRANS_DEFAULT_EXT} - * - * @return the default extension - * @see org.pentaho.di.core.EngineMetaInterface#getDefaultExtension() - */ - public String getDefaultExtension() { - return Const.STRING_TRANS_DEFAULT_EXT; - } + String fromToCacheEntry = stepMeta.getName() + (targetStep != null ? ("-" + targetStep.getName()) : ""); + RowMetaInterface rowMeta = stepsFieldsCache.get(fromToCacheEntry); + if (rowMeta != null) { + return rowMeta; + } - /** - * Gets the XML representation of this transformation. - * - * @return the XML representation of this transformation - * @throws KettleException - * if any errors occur during generation of the XML - * @see org.pentaho.di.core.xml.XMLInterface#getXML() - */ - public String getXML() throws KettleException { - return getXML( true, true, true, true, true ); - } + // See if the step is sending ERROR rows to the specified target step. + // + if (targetStep != null && stepMeta.isSendingErrorRowsToStep(targetStep)) { + // The error rows are the same as the input rows for + // the step but with the selected error fields added + // + row = getPrevStepFields(stepMeta); - /** - * Gets the XML representation of this transformation, including or excluding step, database, slave server, cluster, - * or partition information as specified by the parameters - * - * @param includeSteps - * whether to include step data - * @param includeDatabase - * whether to include database data - * @param includeSlaves - * whether to include slave server data - * @param includeClusters - * whether to include cluster data - * @param includePartitions - * whether to include partition data - * @return the XML representation of this transformation - * @throws KettleException - * if any errors occur during generation of the XML - */ - public String getXML( boolean includeSteps, boolean includeDatabase, boolean includeSlaves, boolean includeClusters, - boolean includePartitions ) throws KettleException { - Props props = null; - if ( Props.isInitialized() ) { - props = Props.getInstance(); - } - - StringBuilder retval = new StringBuilder( 800 ); - - retval.append( XMLHandler.openTag( XML_TAG ) ).append( Const.CR ); - - retval.append( " " ).append( XMLHandler.openTag( XML_TAG_INFO ) ).append( Const.CR ); - - retval.append( " " ).append( XMLHandler.addTagValue( "name", name ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "description", description ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "extended_description", extendedDescription ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "trans_version", trans_version ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "trans_type", transformationType.getCode() ) ); - - if ( trans_status >= 0 ) { - retval.append( " " ).append( XMLHandler.addTagValue( "trans_status", trans_status ) ); - } - retval.append( " " ).append( XMLHandler.addTagValue( "directory", - directory != null ? directory.getPath() : RepositoryDirectory.DIRECTORY_SEPARATOR ) ); - - retval.append( " " ).append( XMLHandler.openTag( XML_TAG_PARAMETERS ) ).append( Const.CR ); - String[] parameters = listParameters(); - for ( int idx = 0; idx < parameters.length; idx++ ) { - retval.append( " " ).append( XMLHandler.openTag( "parameter" ) ).append( Const.CR ); - retval.append( " " ).append( XMLHandler.addTagValue( "name", parameters[idx] ) ); - retval.append( " " ) - .append( XMLHandler.addTagValue( "default_value", getParameterDefault( parameters[idx] ) ) ); - retval.append( " " ) - .append( XMLHandler.addTagValue( "description", getParameterDescription( parameters[idx] ) ) ); - retval.append( " " ).append( XMLHandler.closeTag( "parameter" ) ).append( Const.CR ); - } - retval.append( " " ).append( XMLHandler.closeTag( XML_TAG_PARAMETERS ) ).append( Const.CR ); - - retval.append( " " ).append( Const.CR ); - - // Add the metadata for the various logging tables - // - retval.append( transLogTable.getXML() ); - retval.append( performanceLogTable.getXML() ); - retval.append( channelLogTable.getXML() ); - retval.append( stepLogTable.getXML() ); - retval.append( metricsLogTable.getXML() ); - - retval.append( " " ).append( Const.CR ); - retval.append( " " ).append( Const.CR ); - retval.append( " " ) - .append( XMLHandler.addTagValue( "connection", maxDateConnection == null ? "" : maxDateConnection.getName() ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "table", maxDateTable ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "field", maxDateField ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "offset", maxDateOffset ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "maxdiff", maxDateDifference ) ); - retval.append( " " ).append( Const.CR ); - - retval.append( " " ).append( XMLHandler.addTagValue( "size_rowset", sizeRowset ) ); - - retval.append( " " ).append( XMLHandler.addTagValue( "sleep_time_empty", sleepTimeEmpty ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "sleep_time_full", sleepTimeFull ) ); - - retval.append( " " ).append( XMLHandler.addTagValue( "unique_connections", usingUniqueConnections ) ); - - retval.append( " " ).append( XMLHandler.addTagValue( "feedback_shown", feedbackShown ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "feedback_size", feedbackSize ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "using_thread_priorities", usingThreadPriorityManagment ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "shared_objects_file", sharedObjectsFile ) ); - - // Performance monitoring - // - retval.append( " " ) - .append( XMLHandler.addTagValue( "capture_step_performance", capturingStepPerformanceSnapShots ) ); - retval.append( " " ) - .append( XMLHandler.addTagValue( "step_performance_capturing_delay", stepPerformanceCapturingDelay ) ); - retval.append( " " ) - .append( XMLHandler.addTagValue( "step_performance_capturing_size_limit", stepPerformanceCapturingSizeLimit ) ); - - retval.append( " " ).append( XMLHandler.openTag( XML_TAG_DEPENDENCIES ) ).append( Const.CR ); - for ( int i = 0; i < nrDependencies(); i++ ) { - TransDependency td = getDependency( i ); - retval.append( td.getXML() ); - } - retval.append( " " ).append( XMLHandler.closeTag( XML_TAG_DEPENDENCIES ) ).append( Const.CR ); - - // The partitioning schemas... - // - if ( includePartitions ) { - retval.append( " " ).append( XMLHandler.openTag( XML_TAG_PARTITIONSCHEMAS ) ).append( Const.CR ); - for ( int i = 0; i < partitionSchemas.size(); i++ ) { - PartitionSchema partitionSchema = partitionSchemas.get( i ); - retval.append( partitionSchema.getXML() ); - } - retval.append( " " ).append( XMLHandler.closeTag( XML_TAG_PARTITIONSCHEMAS ) ).append( Const.CR ); - } - // The slave servers... - // - if ( includeSlaves ) { - retval.append( " " ).append( XMLHandler.openTag( XML_TAG_SLAVESERVERS ) ).append( Const.CR ); - for ( int i = 0; i < slaveServers.size(); i++ ) { - SlaveServer slaveServer = slaveServers.get( i ); - retval.append( " " ).append( slaveServer.getXML() ).append( Const.CR ); - } - retval.append( " " ).append( XMLHandler.closeTag( XML_TAG_SLAVESERVERS ) ).append( Const.CR ); - } - - // The cluster schemas... - // - if ( includeClusters ) { - retval.append( " " ).append( XMLHandler.openTag( XML_TAG_CLUSTERSCHEMAS ) ).append( Const.CR ); - for ( int i = 0; i < clusterSchemas.size(); i++ ) { - ClusterSchema clusterSchema = clusterSchemas.get( i ); - retval.append( clusterSchema.getXML() ); - } - retval.append( " " ).append( XMLHandler.closeTag( XML_TAG_CLUSTERSCHEMAS ) ).append( Const.CR ); - } - - retval.append( " " ).append( XMLHandler.addTagValue( "created_user", createdUser ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "created_date", XMLHandler.date2string( createdDate ) ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "modified_user", modifiedUser ) ); - retval.append( " " ).append( XMLHandler.addTagValue( "modified_date", XMLHandler.date2string( modifiedDate ) ) ); - - try { - retval.append( " " ).append( XMLHandler.addTagValue( "key_for_session_key", keyForSessionKey ) ); - } catch ( Exception ex ) { - log.logError( "Unable to decode key", ex ); - } - retval.append( " " ).append( XMLHandler.addTagValue( "is_key_private", isKeyPrivate ) ); - - retval.append( " " ).append( XMLHandler.closeTag( XML_TAG_INFO ) ).append( Const.CR ); - - retval.append( " " ).append( XMLHandler.openTag( XML_TAG_NOTEPADS ) ).append( Const.CR ); - if ( notes != null ) { - for ( int i = 0; i < nrNotes(); i++ ) { - NotePadMeta ni = getNote( i ); - retval.append( ni.getXML() ); - } - } - retval.append( " " ).append( XMLHandler.closeTag( XML_TAG_NOTEPADS ) ).append( Const.CR ); - - // The database connections... - if ( includeDatabase ) { - for ( int i = 0; i < nrDatabases(); i++ ) { - DatabaseMeta dbMeta = getDatabase( i ); - if ( props != null && props.areOnlyUsedConnectionsSavedToXML() ) { - if ( isDatabaseConnectionUsed( dbMeta ) ) { - retval.append( dbMeta.getXML() ); - } - } else { - retval.append( dbMeta.getXML() ); - } - } - } + // Add to this the error fields... + StepErrorMeta stepErrorMeta = stepMeta.getStepErrorMeta(); + row.addRowMeta(stepErrorMeta.getErrorFields()); - if ( includeSteps ) { - retval.append( " " ).append( XMLHandler.openTag( XML_TAG_ORDER ) ).append( Const.CR ); - for ( int i = 0; i < nrTransHops(); i++ ) { - TransHopMeta transHopMeta = getTransHop( i ); - retval.append( transHopMeta.getXML() ).append( Const.CR ); - } - retval.append( " " ).append( XMLHandler.closeTag( XML_TAG_ORDER ) ).append( Const.CR ); + // Store this row in the cache + // + stepsFieldsCache.put(fromToCacheEntry, row); - /* The steps... */ - for ( int i = 0; i < nrSteps(); i++ ) { - StepMeta stepMeta = getStep( i ); - if ( stepMeta.getStepMetaInterface() instanceof HasRepositoryInterface ) { - ( (HasRepositoryInterface) stepMeta.getStepMetaInterface() ).setRepository( repository ); + return row; } - retval.append( stepMeta.getXML() ); - } - /* The error handling metadata on the steps */ - retval.append( " " ).append( XMLHandler.openTag( XML_TAG_STEP_ERROR_HANDLING ) ).append( Const.CR ); - for ( int i = 0; i < nrSteps(); i++ ) { - StepMeta stepMeta = getStep( i ); + // Resume the regular program... - if ( stepMeta.getStepErrorMeta() != null ) { - retval.append( stepMeta.getStepErrorMeta().getXML() ); + if (log.isDebug()) { + log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.FromStepALookingAtPreviousStep", stepMeta.getName(), + String.valueOf(findNrPrevSteps(stepMeta)))); } - } - retval.append( " " ).append( XMLHandler.closeTag( XML_TAG_STEP_ERROR_HANDLING ) ).append( Const.CR ); - } + int nrPrevious = findNrPrevSteps(stepMeta); + for (int i = 0; i < nrPrevious; i++) { + StepMeta prevStepMeta = findPrevStep(stepMeta, i); - // The slave-step-copy/partition distribution. Only used for slave transformations in a clustering environment. - retval.append( " " ).append( slaveStepCopyPartitionDistribution.getXML() ); + if (monitor != null) { + monitor.subTask( + BaseMessages.getString(PKG, "TransMeta.Monitor.CheckingStepTask.Title", prevStepMeta.getName())); + } - // Is this a slave transformation or not? - retval.append( " " ).append( XMLHandler.addTagValue( "slave_transformation", slaveTransformation ) ); + RowMetaInterface add = getStepFields(prevStepMeta, stepMeta, monitor); + if (add == null) { + add = new RowMeta(); + } + if (log.isDebug()) { + log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.FoundFieldsToAdd") + add.toString()); + } + if (i == 0) { + row.addRowMeta(add); + } else { + // See if the add fields are not already in the row + for (int x = 0; x < add.size(); x++) { + ValueMetaInterface v = add.getValueMeta(x); + ValueMetaInterface s = row.searchValueMeta(v.getName()); + if (s == null) { + row.addValueMeta(v); + } + } + } + } - // Also store the attribute groups - // - retval.append( AttributesUtil.getAttributesXml( attributesMap ) ).append( Const.CR ); + if (nrPrevious == 0 && stepMeta.getRemoteInputSteps().size() > 0) { + // Also check the remote input steps (clustering) + // Typically, if there are any, row is still empty at this point + // We'll also be at a starting point in the transformation + // + for (RemoteStep remoteStep : stepMeta.getRemoteInputSteps()) { + RowMetaInterface inputFields = remoteStep.getRowMeta(); + for (ValueMetaInterface inputField : inputFields.getValueMetaList()) { + if (row.searchValueMeta(inputField.getName()) == null) { + row.addValueMeta(inputField); + } + } + } + } - retval.append( "" ).append( Const.CR ); + // Finally, see if we need to add/modify/delete fields with this step "name" + rowMeta = getThisStepFields(stepMeta, targetStep, row, monitor); - return retval.toString(); - } + // Store this row in the cache + // + stepsFieldsCache.put(fromToCacheEntry, rowMeta); - /** - * Parses a file containing the XML that describes the transformation. No default connections are loaded since no - * repository is available at this time. Since the filename is set, internal variables are being set that relate to - * this. - * - * @param fname - * The filename - * @throws KettleXMLException - * if any errors occur during parsing of the specified file - * @throws KettleMissingPluginsException - * in case missing plugins were found (details are in the exception in that case) - */ - public TransMeta( String fname ) throws KettleXMLException, KettleMissingPluginsException { - this( fname, true ); - } - - /** - * Parses a file containing the XML that describes the transformation. No default connections are loaded since no - * repository is available at this time. Since the filename is set, variables are set in the specified variable space - * that relate to this. - * - * @param fname - * The filename - * @param parentVariableSpace - * the parent variable space - * @throws KettleXMLException - * if any errors occur during parsing of the specified file - * @throws KettleMissingPluginsException - * in case missing plugins were found (details are in the exception in that case) - */ - public TransMeta( String fname, VariableSpace parentVariableSpace ) throws KettleXMLException, - KettleMissingPluginsException { - this( fname, null, true, parentVariableSpace ); - } - - /** - * Parses a file containing the XML that describes the transformation. No default connections are loaded since no - * repository is available at this time. - * - * @param fname - * The filename - * @param setInternalVariables - * true if you want to set the internal variables based on this transformation information - * @throws KettleXMLException - * if any errors occur during parsing of the specified file - * @throws KettleMissingPluginsException - * in case missing plugins were found (details are in the exception in that case) - */ - public TransMeta( String fname, boolean setInternalVariables ) throws KettleXMLException, - KettleMissingPluginsException { - this( fname, null, setInternalVariables ); - } + return rowMeta; + } - /** - * Parses a file containing the XML that describes the transformation. - * - * @param fname - * The filename - * @param rep - * The repository to load the default set of connections from, null if no repository is available - * @throws KettleXMLException - * if any errors occur during parsing of the specified file - * @throws KettleMissingPluginsException - * in case missing plugins were found (details are in the exception in that case) - */ - public TransMeta( String fname, Repository rep ) throws KettleXMLException, KettleMissingPluginsException { - this( fname, rep, true ); - } + /** + * Find the fields that are entering a step with a certain name. + * + * @param stepname The name of the step queried + * @return A row containing the fields (w/ origin) entering the step + * @throws KettleStepException the kettle step exception + */ + public RowMetaInterface getPrevStepFields(String stepname) throws KettleStepException { + clearStepFieldsCachce(); + return getPrevStepFields(findStep(stepname)); + } - /** - * Parses a file containing the XML that describes the transformation. - * - * @param fname - * The filename - * @param rep - * The repository to load the default set of connections from, null if no repository is available - * @param setInternalVariables - * true if you want to set the internal variables based on this transformation information - * @throws KettleXMLException - * if any errors occur during parsing of the specified file - * @throws KettleMissingPluginsException - * in case missing plugins were found (details are in the exception in that case) - */ - public TransMeta( String fname, Repository rep, boolean setInternalVariables ) throws KettleXMLException, - KettleMissingPluginsException { - this( fname, rep, setInternalVariables, null ); - } + /** + * Find the fields that are entering a certain step. + * + * @param stepMeta The step queried + * @return A row containing the fields (w/ origin) entering the step + * @throws KettleStepException the kettle step exception + */ + public RowMetaInterface getPrevStepFields(StepMeta stepMeta) throws KettleStepException { + clearStepFieldsCachce(); + return getPrevStepFields(stepMeta, null); + } - /** - * Parses a file containing the XML that describes the transformation. - * - * @param fname - * The filename - * @param rep - * The repository to load the default set of connections from, null if no repository is available - * @param setInternalVariables - * true if you want to set the internal variables based on this transformation information - * @param parentVariableSpace - * the parent variable space to use during TransMeta construction - * @throws KettleXMLException - * if any errors occur during parsing of the specified file - * @throws KettleMissingPluginsException - * in case missing plugins were found (details are in the exception in that case) - */ - public TransMeta( String fname, Repository rep, boolean setInternalVariables, VariableSpace parentVariableSpace ) throws KettleXMLException, KettleMissingPluginsException { - this( fname, rep, setInternalVariables, parentVariableSpace, null ); - } + /** + * Find the fields that are entering a certain step. + * + * @param stepMeta The step queried + * @param monitor The progress monitor for progress dialog. (null if not used!) + * @return A row containing the fields (w/ origin) entering the step + * @throws KettleStepException the kettle step exception + */ + public RowMetaInterface getPrevStepFields(StepMeta stepMeta, ProgressMonitorListener monitor) throws KettleStepException { + clearStepFieldsCachce(); - /** - * Parses a file containing the XML that describes the transformation. - * - * @param fname - * The filename - * @param rep - * The repository to load the default set of connections from, null if no repository is available - * @param setInternalVariables - * true if you want to set the internal variables based on this transformation information - * @param parentVariableSpace - * the parent variable space to use during TransMeta construction - * @param prompter - * the changed/replace listener or null if there is none - * @throws KettleXMLException - * if any errors occur during parsing of the specified file - * @throws KettleMissingPluginsException - * in case missing plugins were found (details are in the exception in that case) - */ - public TransMeta( String fname, Repository rep, boolean setInternalVariables, VariableSpace parentVariableSpace, - OverwritePrompter prompter ) throws KettleXMLException, KettleMissingPluginsException { - this( fname, null, rep, setInternalVariables, parentVariableSpace, prompter ); - } + RowMetaInterface row = new RowMeta(); - /** - * Parses a file containing the XML that describes the transformation. - * - * @param fname - * The filename - * @param metaStore - * the metadata store to reference (or null if there is none) - * @param rep - * The repository to load the default set of connections from, null if no repository is available - * @param setInternalVariables - * true if you want to set the internal variables based on this transformation information - * @param parentVariableSpace - * the parent variable space to use during TransMeta construction - * @param prompter - * the changed/replace listener or null if there is none - * @throws KettleXMLException - * if any errors occur during parsing of the specified file - * @throws KettleMissingPluginsException - * in case missing plugins were found (details are in the exception in that case) - */ - public TransMeta( String fname, IMetaStore metaStore, Repository rep, boolean setInternalVariables, - VariableSpace parentVariableSpace, OverwritePrompter prompter ) - throws KettleXMLException, KettleMissingPluginsException { - this.metaStore = metaStore; - this.repository = rep; + if (stepMeta == null) { + return null; + } - // OK, try to load using the VFS stuff... - Document doc = null; - try { - doc = XMLHandler.loadXMLFile( KettleVFS.getFileObject( fname, parentVariableSpace ) ); - } catch ( KettleFileException e ) { - throw new KettleXMLException( BaseMessages.getString( - PKG, "TransMeta.Exception.ErrorOpeningOrValidatingTheXMLFile", fname ), e ); - } + if (log.isDebug()) { + log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.FromStepALookingAtPreviousStep", stepMeta.getName(), + String.valueOf(findNrPrevSteps(stepMeta)))); + } + for (int i = 0; i < findNrPrevSteps(stepMeta); i++) { + StepMeta prevStepMeta = findPrevStep(stepMeta, i); - if ( doc != null ) { - // Root node: - Node transnode = XMLHandler.getSubNode( doc, XML_TAG ); + if (monitor != null) { + monitor.subTask( + BaseMessages.getString(PKG, "TransMeta.Monitor.CheckingStepTask.Title", prevStepMeta.getName())); + } - if ( transnode == null ) { - throw new KettleXMLException( BaseMessages.getString( - PKG, "TransMeta.Exception.NotValidTransformationXML", fname ) ); - } + RowMetaInterface add = getStepFields(prevStepMeta, stepMeta, monitor); - // Load from this node... - loadXML( transnode, fname, metaStore, rep, setInternalVariables, parentVariableSpace, prompter ); + if (log.isDebug()) { + log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.FoundFieldsToAdd2") + add.toString()); + } + if (i == 0) { + // we expect all input streams to be of the same layout! - } else { - throw new KettleXMLException( BaseMessages.getString( - PKG, "TransMeta.Exception.ErrorOpeningOrValidatingTheXMLFile", fname ) ); + row.addRowMeta(add); // recursive! + } else { + // See if the add fields are not already in the row + for (int x = 0; x < add.size(); x++) { + ValueMetaInterface v = add.getValueMeta(x); + ValueMetaInterface s = row.searchValueMeta(v.getName()); + if (s == null) { + row.addValueMeta(v); + } + } + } + } + return row; } - } - /** - * Instantiates a new transformation meta-data object. - * - * @param xmlStream - * the XML input stream from which to read the transformation definition - * @param rep - * the repository - * @param setInternalVariables - * whether to set internal variables as a result of the creation - * @param parentVariableSpace - * the parent variable space - * @param prompter - * a GUI component that will prompt the user if the new transformation will overwrite an existing one - * @throws KettleXMLException - * if any errors occur during parsing of the specified stream - * @throws KettleMissingPluginsException - * in case missing plugins were found (details are in the exception in that case) - */ - public TransMeta( InputStream xmlStream, Repository rep, boolean setInternalVariables, - VariableSpace parentVariableSpace, OverwritePrompter prompter ) - throws KettleXMLException, KettleMissingPluginsException { - Document doc = XMLHandler.loadXMLFile( xmlStream, null, false, false ); - Node transnode = XMLHandler.getSubNode( doc, XML_TAG ); - loadXML( transnode, rep, setInternalVariables, parentVariableSpace, prompter ); - } - - /** - * Parse a file containing the XML that describes the transformation. Specify a repository to load default list of - * database connections from and to reference in mappings etc. - * - * @param transnode - * The XML node to load from - * @param rep - * the repository to reference. - * @throws KettleXMLException - * if any errors occur during parsing of the specified file - * @throws KettleMissingPluginsException - * in case missing plugins were found (details are in the exception in that case) - */ - public TransMeta( Node transnode, Repository rep ) throws KettleXMLException, KettleMissingPluginsException { - loadXML( transnode, rep, false ); - } + /** + * Return the fields that are emitted by a step with a certain name. + * + * @param stepname The name of the step that's being queried. + * @param row A row containing the input fields or an empty row if no input is required. + * @return A Row containing the output fields. + * @throws KettleStepException the kettle step exception + */ + public RowMetaInterface getThisStepFields(String stepname, RowMetaInterface row) throws KettleStepException { + return getThisStepFields(findStep(stepname), null, row); + } - /** - * Parses an XML DOM (starting at the specified Node) that describes the transformation. - * - * @param transnode - * The XML node to load from - * @param rep - * The repository to load the default list of database connections from (null if no repository is available) - * @param setInternalVariables - * true if you want to set the internal variables based on this transformation information - * @throws KettleXMLException - * if any errors occur during parsing of the specified file - * @throws KettleMissingPluginsException - * in case missing plugins were found (details are in the exception in that case) - */ - public void loadXML( Node transnode, Repository rep, boolean setInternalVariables ) throws KettleXMLException, - KettleMissingPluginsException { - loadXML( transnode, rep, setInternalVariables, null ); - } + /** + * Returns the fields that are emitted by a step. + * + * @param stepMeta : The StepMeta object that's being queried + * @param nextStep : if non-null this is the next step that's call back to ask what's being sent + * @param row : A row containing the input fields or an empty row if no input is required. + * @return A Row containing the output fields. + * @throws KettleStepException the kettle step exception + */ + public RowMetaInterface getThisStepFields(StepMeta stepMeta, StepMeta nextStep, RowMetaInterface row) throws KettleStepException { + return getThisStepFields(stepMeta, nextStep, row, null); + } - /** - * Parses an XML DOM (starting at the specified Node) that describes the transformation. - * - * @param transnode - * The XML node to load from - * @param rep - * The repository to load the default list of database connections from (null if no repository is available) - * @param setInternalVariables - * true if you want to set the internal variables based on this transformation information - * @param parentVariableSpace - * the parent variable space to use during TransMeta construction - * @throws KettleXMLException - * if any errors occur during parsing of the specified file - * @throws KettleMissingPluginsException - * in case missing plugins were found (details are in the exception in that case) - */ - public void loadXML( Node transnode, Repository rep, boolean setInternalVariables, VariableSpace parentVariableSpace ) - throws KettleXMLException, KettleMissingPluginsException { - loadXML( transnode, rep, setInternalVariables, parentVariableSpace, null ); - } + /** + * Returns the fields that are emitted by a step. + * + * @param stepMeta : The StepMeta object that's being queried + * @param nextStep : if non-null this is the next step that's call back to ask what's being sent + * @param row : A row containing the input fields or an empty row if no input is required. + * @param monitor the monitor + * @return A Row containing the output fields. + * @throws KettleStepException the kettle step exception + */ + public RowMetaInterface getThisStepFields(StepMeta stepMeta, StepMeta nextStep, RowMetaInterface row, + ProgressMonitorListener monitor) throws KettleStepException { + // Then this one. + if (log.isDebug()) { + log.logDebug(BaseMessages + .getString(PKG, "TransMeta.Log.GettingFieldsFromStep", stepMeta.getName(), stepMeta.getStepID())); + } + String name = stepMeta.getName(); - /** - * Parses an XML DOM (starting at the specified Node) that describes the transformation. - * - * @param transnode - * The XML node to load from - * @param rep - * The repository to load the default list of database connections from (null if no repository is available) - * @param setInternalVariables - * true if you want to set the internal variables based on this transformation information - * @param parentVariableSpace - * the parent variable space to use during TransMeta construction - * @param prompter - * the changed/replace listener or null if there is none - * @throws KettleXMLException - * if any errors occur during parsing of the specified file - * @throws KettleMissingPluginsException - * in case missing plugins were found (details are in the exception in that case) - */ - public void loadXML( Node transnode, Repository rep, boolean setInternalVariables, VariableSpace parentVariableSpace, - OverwritePrompter prompter ) throws KettleXMLException, KettleMissingPluginsException { - loadXML( transnode, null, rep, setInternalVariables, parentVariableSpace, prompter ); - } + if (monitor != null) { + monitor.subTask(BaseMessages.getString(PKG, "TransMeta.Monitor.GettingFieldsFromStepTask.Title", name)); + } - /** - * Parses an XML DOM (starting at the specified Node) that describes the transformation. - * - * @param transnode - * The XML node to load from - * @param fname - * The filename - * @param rep - * The repository to load the default list of database connections from (null if no repository is available) - * @param setInternalVariables - * true if you want to set the internal variables based on this transformation information - * @param parentVariableSpace - * the parent variable space to use during TransMeta construction - * @param prompter - * the changed/replace listener or null if there is none - * @throws KettleXMLException - * if any errors occur during parsing of the specified file - * @throws KettleMissingPluginsException - * in case missing plugins were found (details are in the exception in that case) - */ - public void loadXML( Node transnode, String fname, Repository rep, boolean setInternalVariables, - VariableSpace parentVariableSpace, OverwritePrompter prompter ) - throws KettleXMLException, KettleMissingPluginsException { - loadXML( transnode, fname, null, rep, setInternalVariables, parentVariableSpace, prompter ); - } - - /** - * Parses an XML DOM (starting at the specified Node) that describes the transformation. - * - * @param transnode - * The XML node to load from - * @param fname - * The filename - * @param rep - * The repository to load the default list of database connections from (null if no repository is available) - * @param setInternalVariables - * true if you want to set the internal variables based on this transformation information - * @param parentVariableSpace - * the parent variable space to use during TransMeta construction - * @param prompter - * the changed/replace listener or null if there is none - * @throws KettleXMLException - * if any errors occur during parsing of the specified file - * @throws KettleMissingPluginsException - * in case missing plugins were found (details are in the exception in that case) - */ - public void loadXML( Node transnode, String fname, IMetaStore metaStore, Repository rep, boolean setInternalVariables, - VariableSpace parentVariableSpace, OverwritePrompter prompter ) - throws KettleXMLException, KettleMissingPluginsException { + StepMetaInterface stepint = stepMeta.getStepMetaInterface(); + RowMetaInterface[] inform = null; + StepMeta[] lu = getInfoStep(stepMeta); + if (Const.isEmpty(lu)) { + inform = new RowMetaInterface[]{stepint.getTableFields(),}; + } else { + inform = new RowMetaInterface[lu.length]; + for (int i = 0; i < lu.length; i++) { + inform[i] = getStepFields(lu[i]); + } + } - KettleMissingPluginsException - missingPluginsException = - new KettleMissingPluginsException( - BaseMessages.getString( PKG, "TransMeta.MissingPluginsFoundWhileLoadingTransformation.Exception" ) ); + setRepositoryOnMappingSteps(); - this.metaStore = metaStore; // Remember this as the primary meta store. + // Go get the fields... + // + RowMetaInterface before = row.clone(); + compatibleGetStepFields(stepint, row, name, inform, nextStep, this); + if (!isSomethingDifferentInRow(before, row)) { + stepint.getFields(before, name, inform, nextStep, this, repository, metaStore); + // pass the clone object to prevent from spoiling data by other steps + row = before; + } - try { + return row; + } - Props props = null; - if ( Props.isInitialized() ) { - props = Props.getInstance(); - } + @SuppressWarnings("deprecation") + private void compatibleGetStepFields(StepMetaInterface stepint, RowMetaInterface row, String name, + RowMetaInterface[] inform, StepMeta nextStep, VariableSpace space) throws KettleStepException { - initializeVariablesFrom( parentVariableSpace ); + stepint.getFields(row, name, inform, nextStep, space); - try { - // Clear the transformation - clear(); + } - // If we are not using a repository, we are getting the transformation from a file - // Set the filename here so it can be used in variables for ALL aspects of the transformation FIX: PDI-8890 - if ( null == rep ) { - setFilename( fname ); + private boolean isSomethingDifferentInRow(RowMetaInterface before, RowMetaInterface after) { + if (before.size() != after.size()) { + return true; } - - // Read all the database connections from the repository to make sure that we don't overwrite any there by - // loading from XML. - // - try { - sharedObjectsFile = XMLHandler.getTagValue( transnode, "info", "shared_objects_file" ); - sharedObjects = rep != null ? rep.readTransSharedObjects( this ) : readSharedObjects(); - } catch ( Exception e ) { - log - .logError( BaseMessages.getString( PKG, "TransMeta.ErrorReadingSharedObjects.Message", e.toString() ) ); - log.logError( Const.getStackTracker( e ) ); + for (int i = 0; i < before.size(); i++) { + ValueMetaInterface beforeValueMeta = before.getValueMeta(i); + ValueMetaInterface afterValueMeta = after.getValueMeta(i); + if (stringsDifferent(beforeValueMeta.getName(), afterValueMeta.getName())) { + return true; + } + if (beforeValueMeta.getType() != afterValueMeta.getType()) { + return true; + } + if (beforeValueMeta.getLength() != afterValueMeta.getLength()) { + return true; + } + if (beforeValueMeta.getPrecision() != afterValueMeta.getPrecision()) { + return true; + } + if (stringsDifferent(beforeValueMeta.getOrigin(), afterValueMeta.getOrigin())) { + return true; + } + if (stringsDifferent(beforeValueMeta.getComments(), afterValueMeta.getComments())) { + return true; + } + if (stringsDifferent(beforeValueMeta.getConversionMask(), afterValueMeta.getConversionMask())) { + return true; + } + if (stringsDifferent(beforeValueMeta.getStringEncoding(), afterValueMeta.getStringEncoding())) { + return true; + } + if (stringsDifferent(beforeValueMeta.getDecimalSymbol(), afterValueMeta.getDecimalSymbol())) { + return true; + } + if (stringsDifferent(beforeValueMeta.getGroupingSymbol(), afterValueMeta.getGroupingSymbol())) { + return true; + } } + return false; + } - // Load the database connections, slave servers, cluster schemas & partition schemas into this object. - // - importFromMetaStore(); - - // Handle connections - int n = XMLHandler.countNodes( transnode, DatabaseMeta.XML_TAG ); - Set privateTransformationDatabases = new HashSet( n ); - if ( log.isDebug() ) { - log.logDebug( BaseMessages.getString( PKG, "TransMeta.Log.WeHaveConnections", String.valueOf( n ) ) ); - } - for ( int i = 0; i < n; i++ ) { - if ( log.isDebug() ) { - log.logDebug( BaseMessages.getString( PKG, "TransMeta.Log.LookingAtConnection" ) + i ); - } - Node nodecon = XMLHandler.getSubNodeByNr( transnode, DatabaseMeta.XML_TAG, i ); - - DatabaseMeta dbcon = new DatabaseMeta( nodecon ); - dbcon.shareVariablesWith( this ); - if ( !dbcon.isShared() ) { - privateTransformationDatabases.add( dbcon.getName() ); - } - - DatabaseMeta exist = findDatabase( dbcon.getName() ); - if ( exist == null ) { - addDatabase( dbcon ); - } else { - if ( !exist.isShared() ) // otherwise, we just keep the shared connection. - { - if ( shouldOverwrite( prompter, props, BaseMessages.getString( PKG, - "TransMeta.Message.OverwriteConnectionYN", dbcon.getName() ), BaseMessages.getString( PKG, - "TransMeta.Message.OverwriteConnection.DontShowAnyMoreMessage" ) ) ) { - int idx = indexOfDatabase( exist ); - removeDatabase( idx ); - addDatabase( idx, dbcon ); - } - } - } - } - setPrivateDatabases( privateTransformationDatabases ); - - // Read the notes... - Node notepadsnode = XMLHandler.getSubNode( transnode, XML_TAG_NOTEPADS ); - int nrnotes = XMLHandler.countNodes( notepadsnode, NotePadMeta.XML_TAG ); - for ( int i = 0; i < nrnotes; i++ ) { - Node notepadnode = XMLHandler.getSubNodeByNr( notepadsnode, NotePadMeta.XML_TAG, i ); - NotePadMeta ni = new NotePadMeta( notepadnode ); - notes.add( ni ); - } - - // Handle Steps - int s = XMLHandler.countNodes( transnode, StepMeta.XML_TAG ); - - if ( log.isDebug() ) { - log.logDebug( BaseMessages.getString( PKG, "TransMeta.Log.ReadingSteps" ) + s + " steps..." ); - } - for ( int i = 0; i < s; i++ ) { - Node stepnode = XMLHandler.getSubNodeByNr( transnode, StepMeta.XML_TAG, i ); - - if ( log.isDebug() ) { - log.logDebug( BaseMessages.getString( PKG, "TransMeta.Log.LookingAtStep" ) + i ); - } - - StepMeta stepMeta = new StepMeta( stepnode, databases, metaStore ); - stepMeta.setParentTransMeta( this ); // for tracing, retain hierarchy - - if ( stepMeta.isMissing() ) { - addMissingTrans( (MissingTrans) stepMeta.getStepMetaInterface() ); - } - // Check if the step exists and if it's a shared step. - // If so, then we will keep the shared version, not this one. - // The stored XML is only for backup purposes. - // - StepMeta check = findStep( stepMeta.getName() ); - if ( check != null ) { - if ( !check.isShared() ) { - // Don't overwrite shared objects - - addOrReplaceStep( stepMeta ); - } else { - check.setDraw( stepMeta.isDrawn() ); // Just keep the drawn flag and location - check.setLocation( stepMeta.getLocation() ); - } - } else { - addStep( stepMeta ); // simply add it. - } + private boolean stringsDifferent(String one, String two) { + if (one == null && two == null) { + return false; + } + if (one == null && two != null) { + return true; + } + if (one != null && two == null) { + return true; } + return !one.equals(two); + } - // Read the error handling code of the steps... - // - Node errorHandlingNode = XMLHandler.getSubNode( transnode, XML_TAG_STEP_ERROR_HANDLING ); - int nrErrorHandlers = XMLHandler.countNodes( errorHandlingNode, StepErrorMeta.XML_TAG ); - for ( int i = 0; i < nrErrorHandlers; i++ ) { - Node stepErrorMetaNode = XMLHandler.getSubNodeByNr( errorHandlingNode, StepErrorMeta.XML_TAG, i ); - StepErrorMeta stepErrorMeta = new StepErrorMeta( this, stepErrorMetaNode, steps ); - if ( stepErrorMeta.getSourceStep() != null ) { - stepErrorMeta.getSourceStep().setStepErrorMeta( stepErrorMeta ); // a bit of a trick, I know. - } + /** + * Set the Repository object on the Mapping step That way the mapping step can determine the output fields for + * repository hosted mappings... This is the exception to the rule so we don't pass this through the getFields() + * method. TODO: figure out a way to make this more generic. + */ + private void setRepositoryOnMappingSteps() { + + for (StepMeta step : steps) { + if (step.getStepMetaInterface() instanceof MappingMeta) { + ((MappingMeta) step.getStepMetaInterface()).setRepository(repository); + ((MappingMeta) step.getStepMetaInterface()).setMetaStore(metaStore); + } + if (step.getStepMetaInterface() instanceof SingleThreaderMeta) { + ((SingleThreaderMeta) step.getStepMetaInterface()).setRepository(repository); + ((SingleThreaderMeta) step.getStepMetaInterface()).setMetaStore(metaStore); + } + if (step.getStepMetaInterface() instanceof JobExecutorMeta) { + ((JobExecutorMeta) step.getStepMetaInterface()).setRepository(repository); + ((JobExecutorMeta) step.getStepMetaInterface()).setMetaStore(metaStore); + } + if (step.getStepMetaInterface() instanceof TransExecutorMeta) { + ((TransExecutorMeta) step.getStepMetaInterface()).setRepository(repository); + ((TransExecutorMeta) step.getStepMetaInterface()).setMetaStore(metaStore); + } } + } - // Have all StreamValueLookups, etc. reference the correct source steps... - // - for ( int i = 0; i < nrSteps(); i++ ) { - StepMeta stepMeta = getStep( i ); - StepMetaInterface sii = stepMeta.getStepMetaInterface(); - if ( sii != null ) { - sii.searchInfoAndTargetSteps( steps ); - } + /** + * Checks if the transformation is using the specified partition schema. + * + * @param partitionSchema the partition schema + * @return true if the transformation is using the partition schema, false otherwise + */ + public boolean isUsingPartitionSchema(PartitionSchema partitionSchema) { + // Loop over all steps and see if the partition schema is used. + for (int i = 0; i < nrSteps(); i++) { + StepPartitioningMeta stepPartitioningMeta = getStep(i).getStepPartitioningMeta(); + if (stepPartitioningMeta != null) { + PartitionSchema check = stepPartitioningMeta.getPartitionSchema(); + if (check != null && check.equals(partitionSchema)) { + return true; + } + } } + return false; + } - // Handle Hops - // - Node ordernode = XMLHandler.getSubNode( transnode, XML_TAG_ORDER ); - n = XMLHandler.countNodes( ordernode, TransHopMeta.XML_TAG ); + /** + * Checks if the transformation is using a cluster schema. + * + * @return true if a cluster schema is used on one or more steps in this transformation, false otherwise + */ + public boolean isUsingAClusterSchema() { + return isUsingClusterSchema(null); + } - if ( log.isDebug() ) { - log.logDebug( BaseMessages.getString( PKG, "TransMeta.Log.WeHaveHops" ) + n + " hops..." ); + /** + * Checks if the transformation is using the specified cluster schema. + * + * @param clusterSchema the cluster schema to check + * @return true if the specified cluster schema is used on one or more steps in this transformation + */ + public boolean isUsingClusterSchema(ClusterSchema clusterSchema) { + // Loop over all steps and see if the partition schema is used. + for (int i = 0; i < nrSteps(); i++) { + ClusterSchema check = getStep(i).getClusterSchema(); + if (check != null && (clusterSchema == null || check.equals(clusterSchema))) { + return true; + } } - for ( int i = 0; i < n; i++ ) { - if ( log.isDebug() ) { - log.logDebug( BaseMessages.getString( PKG, "TransMeta.Log.LookingAtHop" ) + i ); - } - Node hopnode = XMLHandler.getSubNodeByNr( ordernode, TransHopMeta.XML_TAG, i ); + return false; + } - TransHopMeta hopinf = new TransHopMeta( hopnode, steps ); - addTransHop( hopinf ); + /** + * Checks if the transformation is using the specified slave server. + * + * @param slaveServer the slave server + * @return true if the transformation is using the slave server, false otherwise + * @throws KettleException if any errors occur while checking for the slave server + */ + public boolean isUsingSlaveServer(SlaveServer slaveServer) throws KettleException { + // Loop over all steps and see if the slave server is used. + for (int i = 0; i < nrSteps(); i++) { + ClusterSchema clusterSchema = getStep(i).getClusterSchema(); + if (clusterSchema != null) { + for (SlaveServer check : clusterSchema.getSlaveServers()) { + if (check.equals(slaveServer)) { + return true; + } + } + return true; + } } + return false; + } - // - // get transformation info: - // - Node infonode = XMLHandler.getSubNode( transnode, XML_TAG_INFO ); + /** + * Checks if the transformation is referenced by a repository. + * + * @return true if the transformation is referenced by a repository, false otherwise + */ + public boolean isRepReference() { + return isRepReference(getFilename(), this.getName()); + } - // Name - // - setName( XMLHandler.getTagValue( infonode, "name" ) ); + /** + * Checks if the transformation is referenced by a file. If the transformation is not referenced by a repository, it + * is assumed to be referenced by a file. + * + * @return true if the transformation is referenced by a file, false otherwise + * @see #isRepReference() + */ + public boolean isFileReference() { + return !isRepReference(getFilename(), this.getName()); + } - // description - // - description = XMLHandler.getTagValue( infonode, "description" ); + /** + * Checks (using the exact filename and transformation name) if the transformation is referenced by a repository. If + * referenced by a repository, the exact filename should be empty and the exact transformation name should be + * non-empty. + * + * @param exactFilename the exact filename + * @param exactTransname the exact transformation name + * @return true if the transformation is referenced by a repository, false otherwise + */ + public static boolean isRepReference(String exactFilename, String exactTransname) { + return Const.isEmpty(exactFilename) && !Const.isEmpty(exactTransname); + } - // extended description - // - extendedDescription = XMLHandler.getTagValue( infonode, "extended_description" ); + /** + * Checks (using the exact filename and transformation name) if the transformation is referenced by a file. If + * referenced by a repository, the exact filename should be non-empty and the exact transformation name should be + * empty. + * + * @param exactFilename the exact filename + * @param exactTransname the exact transformation name + * @return true if the transformation is referenced by a file, false otherwise + * @see #isRepReference(String, String) + */ + public static boolean isFileReference(String exactFilename, String exactTransname) { + return !isRepReference(exactFilename, exactTransname); + } - // trans version - // - trans_version = XMLHandler.getTagValue( infonode, "trans_version" ); + /** + * Finds the location (index) of the specified hop. + * + * @param hi The hop queried + * @return The location of the hop, or -1 if nothing was found. + */ + public int indexOfTransHop(TransHopMeta hi) { + return hops.indexOf(hi); + } - // trans status - // - trans_status = Const.toInt( XMLHandler.getTagValue( infonode, "trans_status" ), -1 ); + /** + * Finds the location (index) of the specified step. + * + * @param stepMeta The step queried + * @return The location of the step, or -1 if nothing was found. + */ + public int indexOfStep(StepMeta stepMeta) { + return steps.indexOf(stepMeta); + } - String transTypeCode = XMLHandler.getTagValue( infonode, "trans_type" ); - transformationType = TransformationType.getTransformationTypeByCode( transTypeCode ); + /** + * Gets the file type. For TransMeta, this returns a value corresponding to Transformation + * + * @return the file type + * @see org.pentaho.di.core.EngineMetaInterface#getFileType() + */ + public String getFileType() { + return LastUsedFile.FILE_TYPE_TRANSFORMATION; + } - // Optionally load the repository directory... - // - if ( rep != null ) { - String directoryPath = XMLHandler.getTagValue( infonode, "directory" ); - if ( directoryPath != null ) { - directory = rep.findDirectory( directoryPath ); - if ( directory == null ) { // not found - directory = new RepositoryDirectory(); // The root as default - } - } - } + /** + * Gets the transformation filter names. + * + * @return the filter names + * @see org.pentaho.di.core.EngineMetaInterface#getFilterNames() + */ + public String[] getFilterNames() { + return Const.getTransformationFilterNames(); + } - // Read logging table information - // - Node logNode = XMLHandler.getSubNode( infonode, "log" ); - if ( logNode != null ) { - - // Backward compatibility... - // - Node transLogNode = XMLHandler.getSubNode( logNode, TransLogTable.XML_TAG ); - if ( transLogNode == null ) { - // Load the XML - // - transLogTable.findField( TransLogTable.ID.LINES_READ ) - .setSubject( findStep( XMLHandler.getTagValue( infonode, "log", "read" ) ) ); - transLogTable.findField( TransLogTable.ID.LINES_WRITTEN ) - .setSubject( findStep( XMLHandler.getTagValue( infonode, "log", "write" ) ) ); - transLogTable.findField( TransLogTable.ID.LINES_INPUT ) - .setSubject( findStep( XMLHandler.getTagValue( infonode, "log", "input" ) ) ); - transLogTable.findField( TransLogTable.ID.LINES_OUTPUT ) - .setSubject( findStep( XMLHandler.getTagValue( infonode, "log", "output" ) ) ); - transLogTable.findField( TransLogTable.ID.LINES_UPDATED ) - .setSubject( findStep( XMLHandler.getTagValue( infonode, "log", "update" ) ) ); - transLogTable.findField( TransLogTable.ID.LINES_REJECTED ) - .setSubject( findStep( XMLHandler.getTagValue( infonode, "log", "rejected" ) ) ); - - transLogTable.setConnectionName( XMLHandler.getTagValue( infonode, "log", "connection" ) ); - transLogTable.setSchemaName( XMLHandler.getTagValue( infonode, "log", "schema" ) ); - transLogTable.setTableName( XMLHandler.getTagValue( infonode, "log", "table" ) ); - transLogTable.findField( TransLogTable.ID.ID_BATCH ) - .setEnabled( "Y".equalsIgnoreCase( XMLHandler.getTagValue( infonode, "log", "use_batchid" ) ) ); - transLogTable.findField( TransLogTable.ID.LOG_FIELD ) - .setEnabled( "Y".equalsIgnoreCase( XMLHandler.getTagValue( infonode, "log", "USE_LOGFIELD" ) ) ); - transLogTable.setLogSizeLimit( XMLHandler.getTagValue( infonode, "log", "size_limit_lines" ) ); - transLogTable.setLogInterval( XMLHandler.getTagValue( infonode, "log", "interval" ) ); - transLogTable.findField( TransLogTable.ID.CHANNEL_ID ).setEnabled( false ); - transLogTable.findField( TransLogTable.ID.LINES_REJECTED ).setEnabled( false ); - performanceLogTable.setConnectionName( transLogTable.getConnectionName() ); - performanceLogTable.setTableName( XMLHandler.getTagValue( infonode, "log", "step_performance_table" ) ); - } else { - transLogTable.loadXML( transLogNode, databases, steps ); - } - Node perfLogNode = XMLHandler.getSubNode( logNode, PerformanceLogTable.XML_TAG ); - if ( perfLogNode != null ) { - performanceLogTable.loadXML( perfLogNode, databases, steps ); - } - Node channelLogNode = XMLHandler.getSubNode( logNode, ChannelLogTable.XML_TAG ); - if ( channelLogNode != null ) { - channelLogTable.loadXML( channelLogNode, databases, steps ); - } - Node stepLogNode = XMLHandler.getSubNode( logNode, StepLogTable.XML_TAG ); - if ( stepLogNode != null ) { - stepLogTable.loadXML( stepLogNode, databases, steps ); - } - Node metricsLogNode = XMLHandler.getSubNode( logNode, MetricsLogTable.XML_TAG ); - if ( metricsLogNode != null ) { - metricsLogTable.loadXML( metricsLogNode, databases, steps ); - } - } - - // Maxdate range options... - String maxdatcon = XMLHandler.getTagValue( infonode, "maxdate", "connection" ); - maxDateConnection = findDatabase( maxdatcon ); - maxDateTable = XMLHandler.getTagValue( infonode, "maxdate", "table" ); - maxDateField = XMLHandler.getTagValue( infonode, "maxdate", "field" ); - String offset = XMLHandler.getTagValue( infonode, "maxdate", "offset" ); - maxDateOffset = Const.toDouble( offset, 0.0 ); - String mdiff = XMLHandler.getTagValue( infonode, "maxdate", "maxdiff" ); - maxDateDifference = Const.toDouble( mdiff, 0.0 ); - - // Check the dependencies as far as dates are concerned... - // We calculate BEFORE we run the MAX of these dates - // If the date is larger then enddate, startdate is set to MIN_DATE - // - Node depsNode = XMLHandler.getSubNode( infonode, XML_TAG_DEPENDENCIES ); - int nrDeps = XMLHandler.countNodes( depsNode, TransDependency.XML_TAG ); + /** + * Gets the transformation filter extensions. For TransMeta, this method returns the value of + * {@link Const#STRING_TRANS_FILTER_EXT} + * + * @return the filter extensions + * @see org.pentaho.di.core.EngineMetaInterface#getFilterExtensions() + */ + public String[] getFilterExtensions() { + return Const.STRING_TRANS_FILTER_EXT; + } + + /** + * Gets the default extension for a transformation. For TransMeta, this method returns the value of + * {@link Const#STRING_TRANS_DEFAULT_EXT} + * + * @return the default extension + * @see org.pentaho.di.core.EngineMetaInterface#getDefaultExtension() + */ + public String getDefaultExtension() { + return Const.STRING_TRANS_DEFAULT_EXT; + } - for ( int i = 0; i < nrDeps; i++ ) { - Node depNode = XMLHandler.getSubNodeByNr( depsNode, TransDependency.XML_TAG, i ); + /** + * Gets the XML representation of this transformation. + * + * @return the XML representation of this transformation + * @throws KettleException if any errors occur during generation of the XML + * @see org.pentaho.di.core.xml.XMLInterface#getXML() + */ + public String getXML() throws KettleException { + return getXML(true, true, true, true, true); + } - TransDependency transDependency = new TransDependency( depNode, databases ); - if ( transDependency.getDatabase() != null && transDependency.getFieldname() != null ) { - addDependency( transDependency ); - } + /** + * Gets the XML representation of this transformation, including or excluding step, database, slave server, cluster, + * or partition information as specified by the parameters + * + * @param includeSteps whether to include step data + * @param includeDatabase whether to include database data + * @param includeSlaves whether to include slave server data + * @param includeClusters whether to include cluster data + * @param includePartitions whether to include partition data + * @return the XML representation of this transformation + * @throws KettleException if any errors occur during generation of the XML + */ + public String getXML(boolean includeSteps, boolean includeDatabase, boolean includeSlaves, boolean includeClusters, + boolean includePartitions) throws KettleException { + Props props = null; + if (Props.isInitialized()) { + props = Props.getInstance(); } - // Read the named parameters. - Node paramsNode = XMLHandler.getSubNode( infonode, XML_TAG_PARAMETERS ); - int nrParams = XMLHandler.countNodes( paramsNode, "parameter" ); + StringBuilder retval = new StringBuilder(800); + + retval.append(XMLHandler.openTag(XML_TAG)).append(Const.CR); - for ( int i = 0; i < nrParams; i++ ) { - Node paramNode = XMLHandler.getSubNodeByNr( paramsNode, "parameter", i ); + retval.append(" ").append(XMLHandler.openTag(XML_TAG_INFO)).append(Const.CR); - String paramName = XMLHandler.getTagValue( paramNode, "name" ); - String defaultValue = XMLHandler.getTagValue( paramNode, "default_value" ); - String descr = XMLHandler.getTagValue( paramNode, "description" ); + retval.append(" ").append(XMLHandler.addTagValue("name", name)); + retval.append(" ").append(XMLHandler.addTagValue("description", description)); + retval.append(" ").append(XMLHandler.addTagValue("extended_description", extendedDescription)); + retval.append(" ").append(XMLHandler.addTagValue("trans_version", trans_version)); + retval.append(" ").append(XMLHandler.addTagValue("trans_type", transformationType.getCode())); - addParameterDefinition( paramName, defaultValue, descr ); + if (trans_status >= 0) { + retval.append(" ").append(XMLHandler.addTagValue("trans_status", trans_status)); } + retval.append(" ").append(XMLHandler.addTagValue("directory", + directory != null ? directory.getPath() : RepositoryDirectory.DIRECTORY_SEPARATOR)); + + retval.append(" ").append(XMLHandler.openTag(XML_TAG_PARAMETERS)).append(Const.CR); + String[] parameters = listParameters(); + for (int idx = 0; idx < parameters.length; idx++) { + retval.append(" ").append(XMLHandler.openTag("parameter")).append(Const.CR); + retval.append(" ").append(XMLHandler.addTagValue("name", parameters[idx])); + retval.append(" ") + .append(XMLHandler.addTagValue("default_value", getParameterDefault(parameters[idx]))); + retval.append(" ") + .append(XMLHandler.addTagValue("description", getParameterDescription(parameters[idx]))); + retval.append(" ").append(XMLHandler.closeTag("parameter")).append(Const.CR); + } + retval.append(" ").append(XMLHandler.closeTag(XML_TAG_PARAMETERS)).append(Const.CR); - // Read the partitioning schemas - // - Node partSchemasNode = XMLHandler.getSubNode( infonode, XML_TAG_PARTITIONSCHEMAS ); - int nrPartSchemas = XMLHandler.countNodes( partSchemasNode, PartitionSchema.XML_TAG ); - for ( int i = 0; i < nrPartSchemas; i++ ) { - Node partSchemaNode = XMLHandler.getSubNodeByNr( partSchemasNode, PartitionSchema.XML_TAG, i ); - PartitionSchema partitionSchema = new PartitionSchema( partSchemaNode ); - - // Check if the step exists and if it's a shared step. - // If so, then we will keep the shared version, not this one. - // The stored XML is only for backup purposes. - // - PartitionSchema check = findPartitionSchema( partitionSchema.getName() ); - if ( check != null ) { - if ( !check.isShared() ) { - // we don't overwrite shared objects. - if ( shouldOverwrite( prompter, props, BaseMessages - .getString( PKG, "TransMeta.Message.OverwritePartitionSchemaYN", partitionSchema.getName() ), - BaseMessages.getString( PKG, "TransMeta.Message.OverwriteConnection.DontShowAnyMoreMessage" ) ) ) { - addOrReplacePartitionSchema( partitionSchema ); - } - } - } else { - partitionSchemas.add( partitionSchema ); - } - - } - - // Have all step partitioning meta-data reference the correct schemas that we just loaded + retval.append(" ").append(Const.CR); + + // Add the metadata for the various logging tables // - for ( int i = 0; i < nrSteps(); i++ ) { - StepPartitioningMeta stepPartitioningMeta = getStep( i ).getStepPartitioningMeta(); - if ( stepPartitioningMeta != null ) { - stepPartitioningMeta.setPartitionSchemaAfterLoading( partitionSchemas ); - } - StepPartitioningMeta targetStepPartitioningMeta = getStep( i ).getTargetStepPartitioningMeta(); - if ( targetStepPartitioningMeta != null ) { - targetStepPartitioningMeta.setPartitionSchemaAfterLoading( partitionSchemas ); - } - } - - // Read the slave servers... + retval.append(transLogTable.getXML()); + retval.append(performanceLogTable.getXML()); + retval.append(channelLogTable.getXML()); + retval.append(stepLogTable.getXML()); + retval.append(metricsLogTable.getXML()); + + retval.append(" ").append(Const.CR); + retval.append(" ").append(Const.CR); + retval.append(" ") + .append(XMLHandler.addTagValue("connection", maxDateConnection == null ? "" : maxDateConnection.getName())); + retval.append(" ").append(XMLHandler.addTagValue("table", maxDateTable)); + retval.append(" ").append(XMLHandler.addTagValue("field", maxDateField)); + retval.append(" ").append(XMLHandler.addTagValue("offset", maxDateOffset)); + retval.append(" ").append(XMLHandler.addTagValue("maxdiff", maxDateDifference)); + retval.append(" ").append(Const.CR); + + retval.append(" ").append(XMLHandler.addTagValue("size_rowset", sizeRowset)); + + retval.append(" ").append(XMLHandler.addTagValue("sleep_time_empty", sleepTimeEmpty)); + retval.append(" ").append(XMLHandler.addTagValue("sleep_time_full", sleepTimeFull)); + + retval.append(" ").append(XMLHandler.addTagValue("unique_connections", usingUniqueConnections)); + + retval.append(" ").append(XMLHandler.addTagValue("feedback_shown", feedbackShown)); + retval.append(" ").append(XMLHandler.addTagValue("feedback_size", feedbackSize)); + retval.append(" ").append(XMLHandler.addTagValue("using_thread_priorities", usingThreadPriorityManagment)); + retval.append(" ").append(XMLHandler.addTagValue("shared_objects_file", sharedObjectsFile)); + + // Performance monitoring // - Node slaveServersNode = XMLHandler.getSubNode( infonode, XML_TAG_SLAVESERVERS ); - int nrSlaveServers = XMLHandler.countNodes( slaveServersNode, SlaveServer.XML_TAG ); - for ( int i = 0; i < nrSlaveServers; i++ ) { - Node slaveServerNode = XMLHandler.getSubNodeByNr( slaveServersNode, SlaveServer.XML_TAG, i ); - SlaveServer slaveServer = new SlaveServer( slaveServerNode ); - slaveServer.shareVariablesWith( this ); - - // Check if the object exists and if it's a shared object. - // If so, then we will keep the shared version, not this one. - // The stored XML is only for backup purposes. - SlaveServer check = findSlaveServer( slaveServer.getName() ); - if ( check != null ) { - if ( !check.isShared() ) { - // we don't overwrite shared objects. - if ( shouldOverwrite( prompter, props, - BaseMessages.getString( PKG, "TransMeta.Message.OverwriteSlaveServerYN", slaveServer.getName() ), - BaseMessages.getString( PKG, "TransMeta.Message.OverwriteConnection.DontShowAnyMoreMessage" ) ) ) { - addOrReplaceSlaveServer( slaveServer ); - } - } - } else { - slaveServers.add( slaveServer ); - } - } - - // Read the cluster schemas + retval.append(" ") + .append(XMLHandler.addTagValue("capture_step_performance", capturingStepPerformanceSnapShots)); + retval.append(" ") + .append(XMLHandler.addTagValue("step_performance_capturing_delay", stepPerformanceCapturingDelay)); + retval.append(" ") + .append(XMLHandler.addTagValue("step_performance_capturing_size_limit", stepPerformanceCapturingSizeLimit)); + + retval.append(" ").append(XMLHandler.openTag(XML_TAG_DEPENDENCIES)).append(Const.CR); + for (int i = 0; i < nrDependencies(); i++) { + TransDependency td = getDependency(i); + retval.append(td.getXML()); + } + retval.append(" ").append(XMLHandler.closeTag(XML_TAG_DEPENDENCIES)).append(Const.CR); + + // The partitioning schemas... // - Node clusterSchemasNode = XMLHandler.getSubNode( infonode, XML_TAG_CLUSTERSCHEMAS ); - int nrClusterSchemas = XMLHandler.countNodes( clusterSchemasNode, ClusterSchema.XML_TAG ); - for ( int i = 0; i < nrClusterSchemas; i++ ) { - Node clusterSchemaNode = XMLHandler.getSubNodeByNr( clusterSchemasNode, ClusterSchema.XML_TAG, i ); - ClusterSchema clusterSchema = new ClusterSchema( clusterSchemaNode, slaveServers ); - clusterSchema.shareVariablesWith( this ); - - // Check if the object exists and if it's a shared object. - // If so, then we will keep the shared version, not this one. - // The stored XML is only for backup purposes. - ClusterSchema check = findClusterSchema( clusterSchema.getName() ); - if ( check != null ) { - if ( !check.isShared() ) { - // we don't overwrite shared objects. - if ( shouldOverwrite( prompter, props, - BaseMessages.getString( PKG, "TransMeta.Message.OverwriteClusterSchemaYN", clusterSchema.getName() ), - BaseMessages.getString( PKG, "TransMeta.Message.OverwriteConnection.DontShowAnyMoreMessage" ) ) ) { - addOrReplaceClusterSchema( clusterSchema ); - } - } - } else { - clusterSchemas.add( clusterSchema ); - } - } - - // Have all step clustering schema meta-data reference the correct cluster schemas that we just loaded + if (includePartitions) { + retval.append(" ").append(XMLHandler.openTag(XML_TAG_PARTITIONSCHEMAS)).append(Const.CR); + for (int i = 0; i < partitionSchemas.size(); i++) { + PartitionSchema partitionSchema = partitionSchemas.get(i); + retval.append(partitionSchema.getXML()); + } + retval.append(" ").append(XMLHandler.closeTag(XML_TAG_PARTITIONSCHEMAS)).append(Const.CR); + } + // The slave servers... // - for ( int i = 0; i < nrSteps(); i++ ) { - getStep( i ).setClusterSchemaAfterLoading( clusterSchemas ); + if (includeSlaves) { + retval.append(" ").append(XMLHandler.openTag(XML_TAG_SLAVESERVERS)).append(Const.CR); + for (int i = 0; i < slaveServers.size(); i++) { + SlaveServer slaveServer = slaveServers.get(i); + retval.append(" ").append(slaveServer.getXML()).append(Const.CR); + } + retval.append(" ").append(XMLHandler.closeTag(XML_TAG_SLAVESERVERS)).append(Const.CR); } - String srowset = XMLHandler.getTagValue( infonode, "size_rowset" ); - sizeRowset = Const.toInt( srowset, Const.ROWS_IN_ROWSET ); - sleepTimeEmpty = - Const.toInt( XMLHandler.getTagValue( infonode, "sleep_time_empty" ), Const.TIMEOUT_GET_MILLIS ); - sleepTimeFull = Const.toInt( XMLHandler.getTagValue( infonode, "sleep_time_full" ), Const.TIMEOUT_PUT_MILLIS ); - usingUniqueConnections = "Y".equalsIgnoreCase( XMLHandler.getTagValue( infonode, "unique_connections" ) ); - - feedbackShown = !"N".equalsIgnoreCase( XMLHandler.getTagValue( infonode, "feedback_shown" ) ); - feedbackSize = Const.toInt( XMLHandler.getTagValue( infonode, "feedback_size" ), Const.ROWS_UPDATE ); - usingThreadPriorityManagment = - !"N".equalsIgnoreCase( XMLHandler.getTagValue( infonode, "using_thread_priorities" ) ); - - // Performance monitoring for steps... + // The cluster schemas... // - capturingStepPerformanceSnapShots = - "Y".equalsIgnoreCase( XMLHandler.getTagValue( infonode, "capture_step_performance" ) ); - stepPerformanceCapturingDelay = - Const.toLong( XMLHandler.getTagValue( infonode, "step_performance_capturing_delay" ), 1000 ); - stepPerformanceCapturingSizeLimit = XMLHandler.getTagValue( infonode, "step_performance_capturing_size_limit" ); - - // Created user/date - createdUser = XMLHandler.getTagValue( infonode, "created_user" ); - String createDate = XMLHandler.getTagValue( infonode, "created_date" ); - if ( createDate != null ) { - createdDate = XMLHandler.stringToDate( createDate ); + if (includeClusters) { + retval.append(" ").append(XMLHandler.openTag(XML_TAG_CLUSTERSCHEMAS)).append(Const.CR); + for (int i = 0; i < clusterSchemas.size(); i++) { + ClusterSchema clusterSchema = clusterSchemas.get(i); + retval.append(clusterSchema.getXML()); + } + retval.append(" ").append(XMLHandler.closeTag(XML_TAG_CLUSTERSCHEMAS)).append(Const.CR); } - // Changed user/date - modifiedUser = XMLHandler.getTagValue( infonode, "modified_user" ); - String modDate = XMLHandler.getTagValue( infonode, "modified_date" ); - if ( modDate != null ) { - modifiedDate = XMLHandler.stringToDate( modDate ); - } + retval.append(" ").append(XMLHandler.addTagValue("created_user", createdUser)); + retval.append(" ").append(XMLHandler.addTagValue("created_date", XMLHandler.date2string(createdDate))); + retval.append(" ").append(XMLHandler.addTagValue("modified_user", modifiedUser)); + retval.append(" ").append(XMLHandler.addTagValue("modified_date", XMLHandler.date2string(modifiedDate))); - Node partitionDistNode = XMLHandler.getSubNode( transnode, SlaveStepCopyPartitionDistribution.XML_TAG ); - if ( partitionDistNode != null ) { - slaveStepCopyPartitionDistribution = new SlaveStepCopyPartitionDistribution( partitionDistNode ); - } else { - slaveStepCopyPartitionDistribution = new SlaveStepCopyPartitionDistribution(); // leave empty + try { + retval.append(" ").append(XMLHandler.addTagValue("key_for_session_key", keyForSessionKey)); + } catch (Exception ex) { + log.logError("Unable to decode key", ex); } + retval.append(" ").append(XMLHandler.addTagValue("is_key_private", isKeyPrivate)); - // Is this a slave transformation? - // - slaveTransformation = "Y".equalsIgnoreCase( XMLHandler.getTagValue( transnode, "slave_transformation" ) ); - if ( log.isDebug() ) { - log.logDebug( BaseMessages.getString( PKG, "TransMeta.Log.NumberOfStepsReaded" ) + nrSteps() ); - log.logDebug( BaseMessages.getString( PKG, "TransMeta.Log.NumberOfHopsReaded" ) + nrTransHops() ); + retval.append(" ").append(XMLHandler.closeTag(XML_TAG_INFO)).append(Const.CR); + + retval.append(" ").append(XMLHandler.openTag(XML_TAG_NOTEPADS)).append(Const.CR); + if (notes != null) { + for (int i = 0; i < nrNotes(); i++) { + NotePadMeta ni = getNote(i); + retval.append(ni.getXML()); + } + } + retval.append(" ").append(XMLHandler.closeTag(XML_TAG_NOTEPADS)).append(Const.CR); + + // The database connections... + if (includeDatabase) { + for (int i = 0; i < nrDatabases(); i++) { + DatabaseMeta dbMeta = getDatabase(i); + if (props != null && props.areOnlyUsedConnectionsSavedToXML()) { + if (isDatabaseConnectionUsed(dbMeta)) { + retval.append(dbMeta.getXML()); + } + } else { + retval.append(dbMeta.getXML()); + } + } } - sortSteps(); - // Load the attribute groups map - // - attributesMap = AttributesUtil.loadAttributes( XMLHandler.getSubNode( transnode, AttributesUtil.XML_TAG ) ); - - keyForSessionKey = XMLHandler.stringToBinary( XMLHandler.getTagValue( infonode, "key_for_session_key" ) ); - isKeyPrivate = "Y".equals( XMLHandler.getTagValue( infonode, "is_key_private" ) ); - - } catch ( KettleXMLException xe ) { - throw new KettleXMLException( BaseMessages.getString( PKG, "TransMeta.Exception.ErrorReadingTransformation" ), - xe ); - } catch ( KettleException e ) { - throw new KettleXMLException( e ); - } finally { - initializeVariablesFrom( null ); - if ( setInternalVariables ) { - setInternalKettleVariables(); - } - - ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.TransformationMetaLoaded.id, this ); - } - } catch ( Exception e ) { - // See if we have missing plugins to report, those take precedence! - // - if ( !missingPluginsException.getMissingPluginDetailsList().isEmpty() ) { - throw missingPluginsException; - } else { - throw new KettleXMLException( BaseMessages.getString( PKG, "TransMeta.Exception.ErrorReadingTransformation" ), - e ); - } - } finally { - if ( !missingPluginsException.getMissingPluginDetailsList().isEmpty() ) { - throw missingPluginsException; - } - } - } - - public byte[] getKey() { - return keyForSessionKey; - } - - public void setKey( byte[] key ) { - this.keyForSessionKey = key; - } - - public boolean isPrivateKey() { - return isKeyPrivate; - } - - public void setPrivateKey( boolean privateKey ) { - this.isKeyPrivate = privateKey; - } - - /** - * Reads the shared objects (steps, connections, etc.). - * - * @return the shared objects - * @throws KettleException - * if any errors occur while reading the shared objects - */ - public SharedObjects readSharedObjects() throws KettleException { - // Extract the shared steps, connections, etc. using the SharedObjects class - // - String soFile = environmentSubstitute( sharedObjectsFile ); - SharedObjects sharedObjects = new SharedObjects( soFile ); - if ( sharedObjects.getObjectsMap().isEmpty() ) { - log.logDetailed( BaseMessages.getString( PKG, "TransMeta.Log.EmptySharedObjectsFile", soFile ) ); - } - - // First read the databases... - // We read databases & slaves first because there might be dependencies that need to be resolved. - // - for ( SharedObjectInterface object : sharedObjects.getObjectsMap().values() ) { - if ( object instanceof DatabaseMeta ) { - DatabaseMeta databaseMeta = (DatabaseMeta) object; - databaseMeta.shareVariablesWith( this ); - addOrReplaceDatabase( databaseMeta ); - } else if ( object instanceof SlaveServer ) { - SlaveServer slaveServer = (SlaveServer) object; - slaveServer.shareVariablesWith( this ); - addOrReplaceSlaveServer( slaveServer ); - } else if ( object instanceof StepMeta ) { - StepMeta stepMeta = (StepMeta) object; - addOrReplaceStep( stepMeta ); - } else if ( object instanceof PartitionSchema ) { - PartitionSchema partitionSchema = (PartitionSchema) object; - addOrReplacePartitionSchema( partitionSchema ); - } else if ( object instanceof ClusterSchema ) { - ClusterSchema clusterSchema = (ClusterSchema) object; - clusterSchema.shareVariablesWith( this ); - addOrReplaceClusterSchema( clusterSchema ); - } - } - - return sharedObjects; - } - - /** - * Gets a List of all the steps that are used in at least one active hop. These steps will be used to execute the - * transformation. The others will not be executed.
- * Update 3.0 : we also add those steps that are not linked to another hop, but have at least one remote input or - * output step defined. - * - * @param all - * true if you want to get ALL the steps from the transformation, false otherwise - * @return A List of steps - */ - public List getTransHopSteps( boolean all ) { - List st = new ArrayList(); - int idx; + if (includeSteps) { + retval.append(" ").append(XMLHandler.openTag(XML_TAG_ORDER)).append(Const.CR); + for (int i = 0; i < nrTransHops(); i++) { + TransHopMeta transHopMeta = getTransHop(i); + retval.append(transHopMeta.getXML()).append(Const.CR); + } + retval.append(" ").append(XMLHandler.closeTag(XML_TAG_ORDER)).append(Const.CR); - for ( int x = 0; x < nrTransHops(); x++ ) { - TransHopMeta hi = getTransHop( x ); - if ( hi.isEnabled() || all ) { - idx = st.indexOf( hi.getFromStep() ); // FROM - if ( idx < 0 ) { - st.add( hi.getFromStep() ); - } + /* The steps... */ + for (int i = 0; i < nrSteps(); i++) { + StepMeta stepMeta = getStep(i); + if (stepMeta.getStepMetaInterface() instanceof HasRepositoryInterface) { + ((HasRepositoryInterface) stepMeta.getStepMetaInterface()).setRepository(repository); + } + retval.append(stepMeta.getXML()); + } - idx = st.indexOf( hi.getToStep() ); // TO - if ( idx < 0 ) { - st.add( hi.getToStep() ); - } - } - } + /* The error handling metadata on the steps */ + retval.append(" ").append(XMLHandler.openTag(XML_TAG_STEP_ERROR_HANDLING)).append(Const.CR); + for (int i = 0; i < nrSteps(); i++) { + StepMeta stepMeta = getStep(i); - // Also, add the steps that need to be painted, but are not part of a hop - for ( int x = 0; x < nrSteps(); x++ ) { - StepMeta stepMeta = getStep( x ); - if ( stepMeta.isDrawn() && !isStepUsedInTransHops( stepMeta ) ) { - st.add( stepMeta ); - } - if ( !stepMeta.getRemoteInputSteps().isEmpty() || !stepMeta.getRemoteOutputSteps().isEmpty() ) { - if ( !st.contains( stepMeta ) ) { - st.add( stepMeta ); + if (stepMeta.getStepErrorMeta() != null) { + retval.append(stepMeta.getStepErrorMeta().getXML()); + } + } + retval.append(" ").append(XMLHandler.closeTag(XML_TAG_STEP_ERROR_HANDLING)).append(Const.CR); } - } - } - return st; - } + // The slave-step-copy/partition distribution. Only used for slave transformations in a clustering environment. + retval.append(" ").append(slaveStepCopyPartitionDistribution.getXML()); - /** - * Checks if a step has been used in a hop or not. - * - * @param stepMeta - * The step queried. - * @return true if a step is used in a hop (active or not), false otherwise - */ - public boolean isStepUsedInTransHops( StepMeta stepMeta ) { - TransHopMeta fr = findTransHopFrom( stepMeta ); - TransHopMeta to = findTransHopTo( stepMeta ); - if ( fr != null || to != null ) { - return true; - } - return false; - } + // Is this a slave transformation or not? + retval.append(" ").append(XMLHandler.addTagValue("slave_transformation", slaveTransformation)); - /** - * Clears the different changed flags of the transformation. - * - */ - @Override - public void clearChanged() { - changed_steps = false; - changed_hops = false; + // Also store the attribute groups + // + retval.append(AttributesUtil.getAttributesXml(attributesMap)).append(Const.CR); - for ( int i = 0; i < nrSteps(); i++ ) { - getStep( i ).setChanged( false ); - if ( getStep( i ).getStepPartitioningMeta() != null ) { - getStep( i ).getStepPartitioningMeta().hasChanged( false ); - } - } - for ( int i = 0; i < nrTransHops(); i++ ) { - getTransHop( i ).setChanged( false ); - } - for ( int i = 0; i < partitionSchemas.size(); i++ ) { - partitionSchemas.get( i ).setChanged( false ); + retval.append("").append(Const.CR); + + return retval.toString(); } - for ( int i = 0; i < clusterSchemas.size(); i++ ) { - clusterSchemas.get( i ).setChanged( false ); + + /** + * Parses a file containing the XML that describes the transformation. No default connections are loaded since no + * repository is available at this time. Since the filename is set, internal variables are being set that relate to + * this. + * + * @param fname The filename + * @throws KettleXMLException if any errors occur during parsing of the specified file + * @throws KettleMissingPluginsException in case missing plugins were found (details are in the exception in that case) + */ + public TransMeta(String fname) throws KettleXMLException, KettleMissingPluginsException { + this(fname, true); } - super.clearChanged(); - } + /** + * Parses a file containing the XML that describes the transformation. No default connections are loaded since no + * repository is available at this time. Since the filename is set, variables are set in the specified variable space + * that relate to this. + * + * @param fname The filename + * @param parentVariableSpace the parent variable space + * @throws KettleXMLException if any errors occur during parsing of the specified file + * @throws KettleMissingPluginsException in case missing plugins were found (details are in the exception in that case) + */ + public TransMeta(String fname, VariableSpace parentVariableSpace) throws KettleXMLException, + KettleMissingPluginsException { + this(fname, null, true, parentVariableSpace); + } - /** - * Checks whether or not the steps have changed. - * - * @return true if the steps have been changed, false otherwise - */ - public boolean haveStepsChanged() { - if ( changed_steps ) { - return true; + /** + * Parses a file containing the XML that describes the transformation. No default connections are loaded since no + * repository is available at this time. + * + * @param fname The filename + * @param setInternalVariables true if you want to set the internal variables based on this transformation information + * @throws KettleXMLException if any errors occur during parsing of the specified file + * @throws KettleMissingPluginsException in case missing plugins were found (details are in the exception in that case) + */ + public TransMeta(String fname, boolean setInternalVariables) throws KettleXMLException, + KettleMissingPluginsException { + this(fname, null, setInternalVariables); } - for ( int i = 0; i < nrSteps(); i++ ) { - StepMeta stepMeta = getStep( i ); - if ( stepMeta.hasChanged() ) { - return true; - } - if ( stepMeta.getStepPartitioningMeta() != null && stepMeta.getStepPartitioningMeta().hasChanged() ) { - return true; - } + /** + * Parses a file containing the XML that describes the transformation. + * + * @param fname The filename + * @param rep The repository to load the default set of connections from, null if no repository is available + * @throws KettleXMLException if any errors occur during parsing of the specified file + * @throws KettleMissingPluginsException in case missing plugins were found (details are in the exception in that case) + */ + public TransMeta(String fname, Repository rep) throws KettleXMLException, KettleMissingPluginsException { + this(fname, rep, true); } - return false; - } - /** - * Checks whether or not any of the hops have been changed. - * - * @return true if a hop has been changed, false otherwise - */ - public boolean haveHopsChanged() { - if ( changed_hops ) { - return true; + /** + * Parses a file containing the XML that describes the transformation. + * + * @param fname The filename + * @param rep The repository to load the default set of connections from, null if no repository is available + * @param setInternalVariables true if you want to set the internal variables based on this transformation information + * @throws KettleXMLException if any errors occur during parsing of the specified file + * @throws KettleMissingPluginsException in case missing plugins were found (details are in the exception in that case) + */ + public TransMeta(String fname, Repository rep, boolean setInternalVariables) throws KettleXMLException, + KettleMissingPluginsException { + this(fname, rep, setInternalVariables, null); } - for ( int i = 0; i < nrTransHops(); i++ ) { - TransHopMeta hi = getTransHop( i ); - if ( hi.hasChanged() ) { - return true; - } + /** + * Parses a file containing the XML that describes the transformation. + * + * @param fname The filename + * @param rep The repository to load the default set of connections from, null if no repository is available + * @param setInternalVariables true if you want to set the internal variables based on this transformation information + * @param parentVariableSpace the parent variable space to use during TransMeta construction + * @throws KettleXMLException if any errors occur during parsing of the specified file + * @throws KettleMissingPluginsException in case missing plugins were found (details are in the exception in that case) + */ + public TransMeta(String fname, Repository rep, boolean setInternalVariables, VariableSpace parentVariableSpace) throws KettleXMLException, KettleMissingPluginsException { + this(fname, rep, setInternalVariables, parentVariableSpace, null); } - return false; - } - /** - * Checks whether or not any of the partitioning schemas have been changed. - * - * @return true if the partitioning schemas have been changed, false otherwise - */ - public boolean havePartitionSchemasChanged() { - for ( int i = 0; i < partitionSchemas.size(); i++ ) { - PartitionSchema ps = partitionSchemas.get( i ); - if ( ps.hasChanged() ) { - return true; - } + /** + * Parses a file containing the XML that describes the transformation. + * + * @param fname The filename + * @param rep The repository to load the default set of connections from, null if no repository is available + * @param setInternalVariables true if you want to set the internal variables based on this transformation information + * @param parentVariableSpace the parent variable space to use during TransMeta construction + * @param prompter the changed/replace listener or null if there is none + * @throws KettleXMLException if any errors occur during parsing of the specified file + * @throws KettleMissingPluginsException in case missing plugins were found (details are in the exception in that case) + */ + public TransMeta(String fname, Repository rep, boolean setInternalVariables, VariableSpace parentVariableSpace, + OverwritePrompter prompter) throws KettleXMLException, KettleMissingPluginsException { + this(fname, null, rep, setInternalVariables, parentVariableSpace, prompter); } - return false; - } + /** + * Parses a file containing the XML that describes the transformation. + * + * @param fname The filename + * @param metaStore the metadata store to reference (or null if there is none) + * @param rep The repository to load the default set of connections from, null if no repository is available + * @param setInternalVariables true if you want to set the internal variables based on this transformation information + * @param parentVariableSpace the parent variable space to use during TransMeta construction + * @param prompter the changed/replace listener or null if there is none + * @throws KettleXMLException if any errors occur during parsing of the specified file + * @throws KettleMissingPluginsException in case missing plugins were found (details are in the exception in that case) + */ + public TransMeta(String fname, IMetaStore metaStore, Repository rep, boolean setInternalVariables, + VariableSpace parentVariableSpace, OverwritePrompter prompter) + throws KettleXMLException, KettleMissingPluginsException { + this.metaStore = metaStore; + this.repository = rep; + + // OK, try to load using the VFS stuff... + Document doc = null; + try { + doc = XMLHandler.loadXMLFile(KettleVFS.getFileObject(fname, parentVariableSpace)); + } catch (KettleFileException e) { + throw new KettleXMLException(BaseMessages.getString( + PKG, "TransMeta.Exception.ErrorOpeningOrValidatingTheXMLFile", fname), e); + } + + if (doc != null) { + // Root node: + Node transnode = XMLHandler.getSubNode(doc, XML_TAG); - /** - * Checks whether or not any of the clustering schemas have been changed. - * - * @return true if the clustering schemas have been changed, false otherwise - */ - public boolean haveClusterSchemasChanged() { - for ( int i = 0; i < clusterSchemas.size(); i++ ) { - ClusterSchema cs = clusterSchemas.get( i ); - if ( cs.hasChanged() ) { - return true; - } - } + if (transnode == null) { + throw new KettleXMLException(BaseMessages.getString( + PKG, "TransMeta.Exception.NotValidTransformationXML", fname)); + } - return false; - } + // Load from this node... + loadXML(transnode, fname, metaStore, rep, setInternalVariables, parentVariableSpace, prompter); - /** - * Checks whether or not the transformation has changed. - * - * @return true if the transformation has changed, false otherwise - */ - @Override - public boolean hasChanged() { - if ( super.hasChanged() ) { - return true; - } - if ( haveStepsChanged() ) { - return true; - } - if ( haveHopsChanged() ) { - return true; - } - if ( havePartitionSchemasChanged() ) { - return true; - } - if ( haveClusterSchemasChanged() ) { - return true; + } else { + throw new KettleXMLException(BaseMessages.getString( + PKG, "TransMeta.Exception.ErrorOpeningOrValidatingTheXMLFile", fname)); + } } - return false; - } - - /** - * See if there are any loops in the transformation, starting at the indicated step. This works by looking at all the - * previous steps. If you keep going backward and find the step, there is a loop. Both the informational and the - * normal steps need to be checked for loops! - * - * @param stepMeta - * The step position to start looking - * - * @return true if a loop has been found, false if no loop is found. - */ - public boolean hasLoop( StepMeta stepMeta ) { - clearLoopCache(); - return hasLoop( stepMeta, null, true ) || hasLoop( stepMeta, null, false ); - } - - /** - * See if there are any loops in the transformation, starting at the indicated step. This works by looking at all the - * previous steps. If you keep going backward and find the original step again, there is a loop. - * - * @param stepMeta - * The step position to start looking - * @param lookup - * The original step when wandering around the transformation. - * @param info - * Check the informational steps or not. - * - * @return true if a loop has been found, false if no loop is found. - */ - private boolean hasLoop( StepMeta stepMeta, StepMeta lookup, boolean info ) { - String - cacheKey = - stepMeta.getName() + " - " + ( lookup != null ? lookup.getName() : "" ) + " - " + ( info ? "true" : "false" ); - Boolean loop = loopCache.get( cacheKey ); - if ( loop != null ) { - return loop.booleanValue(); - } - - boolean hasLoop = false; - - int nr = findNrPrevSteps( stepMeta, info ); - for ( int i = 0; i < nr && !hasLoop; i++ ) { - StepMeta prevStepMeta = findPrevStep( stepMeta, i, info ); - if ( prevStepMeta != null ) { - if ( prevStepMeta.equals( stepMeta ) ) { - hasLoop = true; - break; // no need to check more but caching this one below - } else if ( prevStepMeta.equals( lookup ) ) { - hasLoop = true; - break; // no need to check more but caching this one below - } else if ( hasLoop( prevStepMeta, lookup == null ? stepMeta : lookup, info ) ) { - hasLoop = true; - break; // no need to check more but caching this one below - } - } - } - - // Store in the cache... - // - loopCache.put( cacheKey, Boolean.valueOf( hasLoop ) ); - - return hasLoop; - } - - /** - * Mark all steps in the transformation as selected. - * - */ - public void selectAll() { - int i; - for ( i = 0; i < nrSteps(); i++ ) { - StepMeta stepMeta = getStep( i ); - stepMeta.setSelected( true ); - } - for ( i = 0; i < nrNotes(); i++ ) { - NotePadMeta ni = getNote( i ); - ni.setSelected( true ); + /** + * Instantiates a new transformation meta-data object. + * + * @param xmlStream the XML input stream from which to read the transformation definition + * @param rep the repository + * @param setInternalVariables whether to set internal variables as a result of the creation + * @param parentVariableSpace the parent variable space + * @param prompter a GUI component that will prompt the user if the new transformation will overwrite an existing one + * @throws KettleXMLException if any errors occur during parsing of the specified stream + * @throws KettleMissingPluginsException in case missing plugins were found (details are in the exception in that case) + */ + public TransMeta(InputStream xmlStream, Repository rep, boolean setInternalVariables, + VariableSpace parentVariableSpace, OverwritePrompter prompter) + throws KettleXMLException, KettleMissingPluginsException { + Document doc = XMLHandler.loadXMLFile(xmlStream, null, false, false); + Node transnode = XMLHandler.getSubNode(doc, XML_TAG); + loadXML(transnode, rep, setInternalVariables, parentVariableSpace, prompter); } - setChanged(); - notifyObservers( "refreshGraph" ); - } - - /** - * Clear the selection of all steps. - * - */ - public void unselectAll() { - int i; - for ( i = 0; i < nrSteps(); i++ ) { - StepMeta stepMeta = getStep( i ); - stepMeta.setSelected( false ); - } - for ( i = 0; i < nrNotes(); i++ ) { - NotePadMeta ni = getNote( i ); - ni.setSelected( false ); + /** + * Parse a file containing the XML that describes the transformation. Specify a repository to load default list of + * database connections from and to reference in mappings etc. + * + * @param transnode The XML node to load from + * @param rep the repository to reference. + * @throws KettleXMLException if any errors occur during parsing of the specified file + * @throws KettleMissingPluginsException in case missing plugins were found (details are in the exception in that case) + */ + public TransMeta(Node transnode, Repository rep) throws KettleXMLException, KettleMissingPluginsException { + loadXML(transnode, rep, false); } - } - /** - * Get an array of all the selected step locations. - * - * @return The selected step locations. - */ - public Point[] getSelectedStepLocations() { - List points = new ArrayList(); - - for ( StepMeta stepMeta : getSelectedSteps() ) { - Point p = stepMeta.getLocation(); - points.add( new Point( p.x, p.y ) ); // explicit copy of location + /** + * Parses an XML DOM (starting at the specified Node) that describes the transformation. + * + * @param transnode The XML node to load from + * @param rep The repository to load the default list of database connections from (null if no repository is available) + * @param setInternalVariables true if you want to set the internal variables based on this transformation information + * @throws KettleXMLException if any errors occur during parsing of the specified file + * @throws KettleMissingPluginsException in case missing plugins were found (details are in the exception in that case) + */ + public void loadXML(Node transnode, Repository rep, boolean setInternalVariables) throws KettleXMLException, + KettleMissingPluginsException { + loadXML(transnode, rep, setInternalVariables, null); } - return points.toArray( new Point[points.size()] ); - } + /** + * Parses an XML DOM (starting at the specified Node) that describes the transformation. + * + * @param transnode The XML node to load from + * @param rep The repository to load the default list of database connections from (null if no repository is available) + * @param setInternalVariables true if you want to set the internal variables based on this transformation information + * @param parentVariableSpace the parent variable space to use during TransMeta construction + * @throws KettleXMLException if any errors occur during parsing of the specified file + * @throws KettleMissingPluginsException in case missing plugins were found (details are in the exception in that case) + */ + public void loadXML(Node transnode, Repository rep, boolean setInternalVariables, VariableSpace parentVariableSpace) + throws KettleXMLException, KettleMissingPluginsException { + loadXML(transnode, rep, setInternalVariables, parentVariableSpace, null); + } - /** - * Get an array of all the selected note locations. - * - * @return The selected note locations. - */ - public Point[] getSelectedNoteLocations() { - List points = new ArrayList(); + /** + * Parses an XML DOM (starting at the specified Node) that describes the transformation. + * + * @param transnode The XML node to load from + * @param rep The repository to load the default list of database connections from (null if no repository is available) + * @param setInternalVariables true if you want to set the internal variables based on this transformation information + * @param parentVariableSpace the parent variable space to use during TransMeta construction + * @param prompter the changed/replace listener or null if there is none + * @throws KettleXMLException if any errors occur during parsing of the specified file + * @throws KettleMissingPluginsException in case missing plugins were found (details are in the exception in that case) + */ + public void loadXML(Node transnode, Repository rep, boolean setInternalVariables, VariableSpace parentVariableSpace, + OverwritePrompter prompter) throws KettleXMLException, KettleMissingPluginsException { + loadXML(transnode, null, rep, setInternalVariables, parentVariableSpace, prompter); + } - for ( NotePadMeta ni : getSelectedNotes() ) { - Point p = ni.getLocation(); - points.add( new Point( p.x, p.y ) ); // explicit copy of location + /** + * Parses an XML DOM (starting at the specified Node) that describes the transformation. + * + * @param transnode The XML node to load from + * @param fname The filename + * @param rep The repository to load the default list of database connections from (null if no repository is available) + * @param setInternalVariables true if you want to set the internal variables based on this transformation information + * @param parentVariableSpace the parent variable space to use during TransMeta construction + * @param prompter the changed/replace listener or null if there is none + * @throws KettleXMLException if any errors occur during parsing of the specified file + * @throws KettleMissingPluginsException in case missing plugins were found (details are in the exception in that case) + */ + public void loadXML(Node transnode, String fname, Repository rep, boolean setInternalVariables, + VariableSpace parentVariableSpace, OverwritePrompter prompter) + throws KettleXMLException, KettleMissingPluginsException { + loadXML(transnode, fname, null, rep, setInternalVariables, parentVariableSpace, prompter); } - return points.toArray( new Point[points.size()] ); - } + /** + * Parses an XML DOM (starting at the specified Node) that describes the transformation. + * + * @param transnode The XML node to load from + * @param fname The filename + * @param rep The repository to load the default list of database connections from (null if no repository is available) + * @param setInternalVariables true if you want to set the internal variables based on this transformation information + * @param parentVariableSpace the parent variable space to use during TransMeta construction + * @param prompter the changed/replace listener or null if there is none + * @throws KettleXMLException if any errors occur during parsing of the specified file + * @throws KettleMissingPluginsException in case missing plugins were found (details are in the exception in that case) + */ + public void loadXML(Node transnode, String fname, IMetaStore metaStore, Repository rep, boolean setInternalVariables, + VariableSpace parentVariableSpace, OverwritePrompter prompter) + throws KettleXMLException, KettleMissingPluginsException { - /** - * Gets a list of the selected steps. - * - * @return A list of all the selected steps. - */ - public List getSelectedSteps() { - List selection = new ArrayList(); - for ( StepMeta stepMeta : steps ) { - if ( stepMeta.isSelected() ) { - selection.add( stepMeta ); - } + KettleMissingPluginsException + missingPluginsException = + new KettleMissingPluginsException( + BaseMessages.getString(PKG, "TransMeta.MissingPluginsFoundWhileLoadingTransformation.Exception")); - } - return selection; - } + this.metaStore = metaStore; // Remember this as the primary meta store. - /** - * Gets an array of all the selected step names. - * - * @return An array of all the selected step names. - */ - public String[] getSelectedStepNames() { - List selection = getSelectedSteps(); - String[] retval = new String[selection.size()]; - for ( int i = 0; i < retval.length; i++ ) { - StepMeta stepMeta = selection.get( i ); - retval[i] = stepMeta.getName(); - } - return retval; - } - - /** - * Gets an array of the locations of an array of steps. - * - * @param steps - * An array of steps - * @return an array of the locations of an array of steps - */ - public int[] getStepIndexes( List steps ) { - int[] retval = new int[steps.size()]; + try { - for ( int i = 0; i < steps.size(); i++ ) { - retval[i] = indexOfStep( steps.get( i ) ); - } + Props props = null; + if (Props.isInitialized()) { + props = Props.getInstance(); + } - return retval; - } + initializeVariablesFrom(parentVariableSpace); - /** - * Gets the maximum size of the canvas by calculating the maximum location of a step. - * - * @return Maximum coordinate of a step in the transformation + (100,100) for safety. - */ - public Point getMaximum() { - int maxx = 0, maxy = 0; - for ( int i = 0; i < nrSteps(); i++ ) { - StepMeta stepMeta = getStep( i ); - Point loc = stepMeta.getLocation(); - if ( loc.x > maxx ) { - maxx = loc.x; - } - if ( loc.y > maxy ) { - maxy = loc.y; - } - } - for ( int i = 0; i < nrNotes(); i++ ) { - NotePadMeta notePadMeta = getNote( i ); - Point loc = notePadMeta.getLocation(); - if ( loc.x + notePadMeta.width > maxx ) { - maxx = loc.x + notePadMeta.width; - } - if ( loc.y + notePadMeta.height > maxy ) { - maxy = loc.y + notePadMeta.height; - } - } - - return new Point( maxx + 100, maxy + 100 ); - } - - /** - * Gets the minimum point on the canvas of a transformation. - * - * @return Minimum coordinate of a step in the transformation - */ - public Point getMinimum() { - int minx = Integer.MAX_VALUE, miny = Integer.MAX_VALUE; - for ( int i = 0; i < nrSteps(); i++ ) { - StepMeta stepMeta = getStep( i ); - Point loc = stepMeta.getLocation(); - if ( loc.x < minx ) { - minx = loc.x; - } - if ( loc.y < miny ) { - miny = loc.y; - } - } - for ( int i = 0; i < nrNotes(); i++ ) { - NotePadMeta notePadMeta = getNote( i ); - Point loc = notePadMeta.getLocation(); - if ( loc.x < minx ) { - minx = loc.x; - } - if ( loc.y < miny ) { - miny = loc.y; - } - } - - if ( minx > BORDER_INDENT && minx != Integer.MAX_VALUE ) { - minx -= BORDER_INDENT; - } else { - minx = 0; - } - if ( miny > BORDER_INDENT && miny != Integer.MAX_VALUE ) { - miny -= BORDER_INDENT; - } else { - miny = 0; - } - - return new Point( minx, miny ); - } - - /** - * Gets the names of all the steps. - * - * @return An array of step names. - */ - public String[] getStepNames() { - String[] retval = new String[nrSteps()]; + try { + // Clear the transformation + clear(); - for ( int i = 0; i < nrSteps(); i++ ) { - retval[i] = getStep( i ).getName(); - } + // If we are not using a repository, we are getting the transformation from a file + // Set the filename here so it can be used in variables for ALL aspects of the transformation FIX: PDI-8890 + if (null == rep) { + setFilename(fname); + } - return retval; - } + // Read all the database connections from the repository to make sure that we don't overwrite any there by + // loading from XML. + // + try { + sharedObjectsFile = XMLHandler.getTagValue(transnode, "info", "shared_objects_file"); + sharedObjects = rep != null ? rep.readTransSharedObjects(this) : readSharedObjects(); + } catch (Exception e) { + log + .logError(BaseMessages.getString(PKG, "TransMeta.ErrorReadingSharedObjects.Message", e.toString())); + log.logError(Const.getStackTracker(e)); + } - /** - * Gets all the steps as an array. - * - * @return An array of all the steps in the transformation. - */ - public StepMeta[] getStepsArray() { - StepMeta[] retval = new StepMeta[nrSteps()]; + // Load the database connections, slave servers, cluster schemas & partition schemas into this object. + // + importFromMetaStore(); - for ( int i = 0; i < nrSteps(); i++ ) { - retval[i] = getStep( i ); - } + // Handle connections + int n = XMLHandler.countNodes(transnode, DatabaseMeta.XML_TAG); + Set privateTransformationDatabases = new HashSet(n); + if (log.isDebug()) { + log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.WeHaveConnections", String.valueOf(n))); + } + for (int i = 0; i < n; i++) { + if (log.isDebug()) { + log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.LookingAtConnection") + i); + } + Node nodecon = XMLHandler.getSubNodeByNr(transnode, DatabaseMeta.XML_TAG, i); + + DatabaseMeta dbcon = new DatabaseMeta(nodecon); + dbcon.shareVariablesWith(this); + if (!dbcon.isShared()) { + privateTransformationDatabases.add(dbcon.getName()); + } + + DatabaseMeta exist = findDatabase(dbcon.getName()); + if (exist == null) { + addDatabase(dbcon); + } else { + if (!exist.isShared()) // otherwise, we just keep the shared connection. + { + if (shouldOverwrite(prompter, props, BaseMessages.getString(PKG, + "TransMeta.Message.OverwriteConnectionYN", dbcon.getName()), BaseMessages.getString(PKG, + "TransMeta.Message.OverwriteConnection.DontShowAnyMoreMessage"))) { + int idx = indexOfDatabase(exist); + removeDatabase(idx); + addDatabase(idx, dbcon); + } + } + } + } + setPrivateDatabases(privateTransformationDatabases); + + // Read the notes... + Node notepadsnode = XMLHandler.getSubNode(transnode, XML_TAG_NOTEPADS); + int nrnotes = XMLHandler.countNodes(notepadsnode, NotePadMeta.XML_TAG); + for (int i = 0; i < nrnotes; i++) { + Node notepadnode = XMLHandler.getSubNodeByNr(notepadsnode, NotePadMeta.XML_TAG, i); + NotePadMeta ni = new NotePadMeta(notepadnode); + notes.add(ni); + } - return retval; - } + // Handle Steps + int s = XMLHandler.countNodes(transnode, StepMeta.XML_TAG); - /** - * Looks in the transformation to find a step in a previous location starting somewhere. - * - * @param startStep - * The starting step - * @param stepToFind - * The step to look for backward in the transformation - * @return true if we can find the step in an earlier location in the transformation. - */ - public boolean findPrevious( StepMeta startStep, StepMeta stepToFind ) { - String key = startStep.getName() + " - " + stepToFind.getName(); - Boolean result = loopCache.get( key ); - if ( result != null ) { - return result; - } - - // Normal steps - // - List previousSteps = findPreviousSteps( startStep, false ); - for ( int i = 0; i < previousSteps.size(); i++ ) { - StepMeta stepMeta = previousSteps.get( i ); - if ( stepMeta.equals( stepToFind ) ) { - loopCache.put( key, true ); - return true; - } - - boolean found = findPrevious( stepMeta, stepToFind ); // Look further back in the tree. - if ( found ) { - loopCache.put( key, true ); - return true; - } - } - - // Info steps - List infoSteps = findPreviousSteps( startStep, true ); - for ( int i = 0; i < infoSteps.size(); i++ ) { - StepMeta stepMeta = infoSteps.get( i ); - if ( stepMeta.equals( stepToFind ) ) { - loopCache.put( key, true ); - return true; - } - - boolean found = findPrevious( stepMeta, stepToFind ); // Look further back in the tree. - if ( found ) { - loopCache.put( key, true ); - return true; - } - } - - loopCache.put( key, false ); - return false; - } - - /** - * Puts the steps in alphabetical order. - */ - public void sortSteps() { - try { - Collections.sort( steps ); - } catch ( Exception e ) { - log.logError( BaseMessages.getString( PKG, "TransMeta.Exception.ErrorOfSortingSteps" ) + e ); - log.logError( Const.getStackTracker( e ) ); - } - } + if (log.isDebug()) { + log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.ReadingSteps") + s + " steps..."); + } + for (int i = 0; i < s; i++) { + Node stepnode = XMLHandler.getSubNodeByNr(transnode, StepMeta.XML_TAG, i); + + if (log.isDebug()) { + log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.LookingAtStep") + i); + } + + StepMeta stepMeta = new StepMeta(stepnode, databases, metaStore); + stepMeta.setParentTransMeta(this); // for tracing, retain hierarchy + + if (stepMeta.isMissing()) { + addMissingTrans((MissingTrans) stepMeta.getStepMetaInterface()); + } + // Check if the step exists and if it's a shared step. + // If so, then we will keep the shared version, not this one. + // The stored XML is only for backup purposes. + // + StepMeta check = findStep(stepMeta.getName()); + if (check != null) { + if (!check.isShared()) { + // Don't overwrite shared objects + + addOrReplaceStep(stepMeta); + } else { + check.setDraw(stepMeta.isDrawn()); // Just keep the drawn flag and location + check.setLocation(stepMeta.getLocation()); + } + } else { + addStep(stepMeta); // simply add it. + } + } - /** - * Sorts all the hops in the transformation. - */ - public void sortHops() { - Collections.sort( hops ); - } + // Read the error handling code of the steps... + // + Node errorHandlingNode = XMLHandler.getSubNode(transnode, XML_TAG_STEP_ERROR_HANDLING); + int nrErrorHandlers = XMLHandler.countNodes(errorHandlingNode, StepErrorMeta.XML_TAG); + for (int i = 0; i < nrErrorHandlers; i++) { + Node stepErrorMetaNode = XMLHandler.getSubNodeByNr(errorHandlingNode, StepErrorMeta.XML_TAG, i); + StepErrorMeta stepErrorMeta = new StepErrorMeta(this, stepErrorMetaNode, steps); + if (stepErrorMeta.getSourceStep() != null) { + stepErrorMeta.getSourceStep().setStepErrorMeta(stepErrorMeta); // a bit of a trick, I know. + } + } - /** The previous count. */ - private long prevCount; + // Have all StreamValueLookups, etc. reference the correct source steps... + // + for (int i = 0; i < nrSteps(); i++) { + StepMeta stepMeta = getStep(i); + StepMetaInterface sii = stepMeta.getStepMetaInterface(); + if (sii != null) { + sii.searchInfoAndTargetSteps(steps); + } + } - /** - * Puts the steps in a more natural order: from start to finish. For the moment, we ignore splits and joins. Splits - * and joins can't be listed sequentially in any case! - * - * @return a map containing all the previous steps per step - */ - public Map> sortStepsNatural() { - long startTime = System.currentTimeMillis(); - - prevCount = 0; - - // First create a map where all the previous steps of another step are kept... - // - final Map> stepMap = new HashMap>(); - - // Also cache the previous steps - // - final Map> previousCache = new HashMap>(); - - // Cache calculation of steps before another - // - Map> beforeCache = new HashMap>(); - - for ( StepMeta stepMeta : steps ) { - // What are the previous steps? (cached version for performance) - // - List prevSteps = previousCache.get( stepMeta ); - if ( prevSteps == null ) { - prevSteps = findPreviousSteps( stepMeta ); - prevCount++; - previousCache.put( stepMeta, prevSteps ); - } - - // Now get the previous steps recursively, store them in the step map - // - for ( StepMeta prev : prevSteps ) { - Map beforePrevMap = updateFillStepMap( previousCache, beforeCache, stepMeta, prev ); - stepMap.put( stepMeta, beforePrevMap ); - - // Store it also in the beforeCache... - // - beforeCache.put( prev, beforePrevMap ); - } - } + // Handle Hops + // + Node ordernode = XMLHandler.getSubNode(transnode, XML_TAG_ORDER); + n = XMLHandler.countNodes(ordernode, TransHopMeta.XML_TAG); - Collections.sort( steps, new Comparator() { + if (log.isDebug()) { + log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.WeHaveHops") + n + " hops..."); + } + for (int i = 0; i < n; i++) { + if (log.isDebug()) { + log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.LookingAtHop") + i); + } + Node hopnode = XMLHandler.getSubNodeByNr(ordernode, TransHopMeta.XML_TAG, i); + + TransHopMeta hopinf = new TransHopMeta(hopnode, steps); + addTransHop(hopinf); + } - public int compare( StepMeta o1, StepMeta o2 ) { + // + // get transformation info: + // + Node infonode = XMLHandler.getSubNode(transnode, XML_TAG_INFO); + + // Name + // + setName(XMLHandler.getTagValue(infonode, "name")); + + // description + // + description = XMLHandler.getTagValue(infonode, "description"); + + // extended description + // + extendedDescription = XMLHandler.getTagValue(infonode, "extended_description"); + + // trans version + // + trans_version = XMLHandler.getTagValue(infonode, "trans_version"); + + // trans status + // + trans_status = Const.toInt(XMLHandler.getTagValue(infonode, "trans_status"), -1); + + String transTypeCode = XMLHandler.getTagValue(infonode, "trans_type"); + transformationType = TransformationType.getTransformationTypeByCode(transTypeCode); + + // Optionally load the repository directory... + // + if (rep != null) { + String directoryPath = XMLHandler.getTagValue(infonode, "directory"); + if (directoryPath != null) { + directory = rep.findDirectory(directoryPath); + if (directory == null) { // not found + directory = new RepositoryDirectory(); // The root as default + } + } + } - Map beforeMap = stepMap.get( o1 ); - if ( beforeMap != null ) { - if ( beforeMap.get( o2 ) == null ) { - return -1; - } else { - return 1; - } - } else { - return o1.getName().compareToIgnoreCase( o2.getName() ); - } - } - } ); + // Read logging table information + // + Node logNode = XMLHandler.getSubNode(infonode, "log"); + if (logNode != null) { + + // Backward compatibility... + // + Node transLogNode = XMLHandler.getSubNode(logNode, TransLogTable.XML_TAG); + if (transLogNode == null) { + // Load the XML + // + transLogTable.findField(TransLogTable.ID.LINES_READ) + .setSubject(findStep(XMLHandler.getTagValue(infonode, "log", "read"))); + transLogTable.findField(TransLogTable.ID.LINES_WRITTEN) + .setSubject(findStep(XMLHandler.getTagValue(infonode, "log", "write"))); + transLogTable.findField(TransLogTable.ID.LINES_INPUT) + .setSubject(findStep(XMLHandler.getTagValue(infonode, "log", "input"))); + transLogTable.findField(TransLogTable.ID.LINES_OUTPUT) + .setSubject(findStep(XMLHandler.getTagValue(infonode, "log", "output"))); + transLogTable.findField(TransLogTable.ID.LINES_UPDATED) + .setSubject(findStep(XMLHandler.getTagValue(infonode, "log", "update"))); + transLogTable.findField(TransLogTable.ID.LINES_REJECTED) + .setSubject(findStep(XMLHandler.getTagValue(infonode, "log", "rejected"))); + + transLogTable.setConnectionName(XMLHandler.getTagValue(infonode, "log", "connection")); + transLogTable.setSchemaName(XMLHandler.getTagValue(infonode, "log", "schema")); + transLogTable.setTableName(XMLHandler.getTagValue(infonode, "log", "table")); + transLogTable.findField(TransLogTable.ID.ID_BATCH) + .setEnabled("Y".equalsIgnoreCase(XMLHandler.getTagValue(infonode, "log", "use_batchid"))); + transLogTable.findField(TransLogTable.ID.LOG_FIELD) + .setEnabled("Y".equalsIgnoreCase(XMLHandler.getTagValue(infonode, "log", "USE_LOGFIELD"))); + transLogTable.setLogSizeLimit(XMLHandler.getTagValue(infonode, "log", "size_limit_lines")); + transLogTable.setLogInterval(XMLHandler.getTagValue(infonode, "log", "interval")); + transLogTable.findField(TransLogTable.ID.CHANNEL_ID).setEnabled(false); + transLogTable.findField(TransLogTable.ID.LINES_REJECTED).setEnabled(false); + performanceLogTable.setConnectionName(transLogTable.getConnectionName()); + performanceLogTable.setTableName(XMLHandler.getTagValue(infonode, "log", "step_performance_table")); + } else { + transLogTable.loadXML(transLogNode, databases, steps); + } + Node perfLogNode = XMLHandler.getSubNode(logNode, PerformanceLogTable.XML_TAG); + if (perfLogNode != null) { + performanceLogTable.loadXML(perfLogNode, databases, steps); + } + Node channelLogNode = XMLHandler.getSubNode(logNode, ChannelLogTable.XML_TAG); + if (channelLogNode != null) { + channelLogTable.loadXML(channelLogNode, databases, steps); + } + Node stepLogNode = XMLHandler.getSubNode(logNode, StepLogTable.XML_TAG); + if (stepLogNode != null) { + stepLogTable.loadXML(stepLogNode, databases, steps); + } + Node metricsLogNode = XMLHandler.getSubNode(logNode, MetricsLogTable.XML_TAG); + if (metricsLogNode != null) { + metricsLogTable.loadXML(metricsLogNode, databases, steps); + } + } - long endTime = System.currentTimeMillis(); - log.logBasic( - BaseMessages.getString( PKG, "TransMeta.Log.TimeExecutionStepSort", ( endTime - startTime ), prevCount ) ); + // Maxdate range options... + String maxdatcon = XMLHandler.getTagValue(infonode, "maxdate", "connection"); + maxDateConnection = findDatabase(maxdatcon); + maxDateTable = XMLHandler.getTagValue(infonode, "maxdate", "table"); + maxDateField = XMLHandler.getTagValue(infonode, "maxdate", "field"); + String offset = XMLHandler.getTagValue(infonode, "maxdate", "offset"); + maxDateOffset = Const.toDouble(offset, 0.0); + String mdiff = XMLHandler.getTagValue(infonode, "maxdate", "maxdiff"); + maxDateDifference = Const.toDouble(mdiff, 0.0); + + // Check the dependencies as far as dates are concerned... + // We calculate BEFORE we run the MAX of these dates + // If the date is larger then enddate, startdate is set to MIN_DATE + // + Node depsNode = XMLHandler.getSubNode(infonode, XML_TAG_DEPENDENCIES); + int nrDeps = XMLHandler.countNodes(depsNode, TransDependency.XML_TAG); + + for (int i = 0; i < nrDeps; i++) { + Node depNode = XMLHandler.getSubNodeByNr(depsNode, TransDependency.XML_TAG, i); + + TransDependency transDependency = new TransDependency(depNode, databases); + if (transDependency.getDatabase() != null && transDependency.getFieldname() != null) { + addDependency(transDependency); + } + } - return stepMap; - } + // Read the named parameters. + Node paramsNode = XMLHandler.getSubNode(infonode, XML_TAG_PARAMETERS); + int nrParams = XMLHandler.countNodes(paramsNode, "parameter"); - /** - * Fills a map with all steps previous to the given step. This method uses a caching technique, so if a map is - * provided that contains the specified previous step, it is immediately returned to avoid unnecessary processing. - * Otherwise, the previous steps are determined and added to the map recursively, and a cache is constructed for later - * use. - * - * @param previousCache - * the previous cache, must be non-null - * @param beforeCache - * the before cache, must be non-null - * @param originStepMeta - * the origin step meta - * @param previousStepMeta - * the previous step meta - * @return the map - */ - private Map updateFillStepMap( Map> previousCache, - Map> beforeCache, StepMeta originStepMeta, StepMeta previousStepMeta ) { + for (int i = 0; i < nrParams; i++) { + Node paramNode = XMLHandler.getSubNodeByNr(paramsNode, "parameter", i); - // See if we have a hash map to store step occurrence (located before the step) - // - Map beforeMap = beforeCache.get( previousStepMeta ); - if ( beforeMap == null ) { - beforeMap = new HashMap(); - } else { - return beforeMap; // Nothing left to do here! - } + String paramName = XMLHandler.getTagValue(paramNode, "name"); + String defaultValue = XMLHandler.getTagValue(paramNode, "default_value"); + String descr = XMLHandler.getTagValue(paramNode, "description"); - // Store the current previous step in the map - // - beforeMap.put( previousStepMeta, Boolean.TRUE ); + addParameterDefinition(paramName, defaultValue, descr); + } - // Figure out all the previous steps as well, they all need to go in there... - // - List prevSteps = previousCache.get( previousStepMeta ); - if ( prevSteps == null ) { - prevSteps = findPreviousSteps( previousStepMeta ); - prevCount++; - previousCache.put( previousStepMeta, prevSteps ); - } + // Read the partitioning schemas + // + Node partSchemasNode = XMLHandler.getSubNode(infonode, XML_TAG_PARTITIONSCHEMAS); + int nrPartSchemas = XMLHandler.countNodes(partSchemasNode, PartitionSchema.XML_TAG); + for (int i = 0; i < nrPartSchemas; i++) { + Node partSchemaNode = XMLHandler.getSubNodeByNr(partSchemasNode, PartitionSchema.XML_TAG, i); + PartitionSchema partitionSchema = new PartitionSchema(partSchemaNode); + + // Check if the step exists and if it's a shared step. + // If so, then we will keep the shared version, not this one. + // The stored XML is only for backup purposes. + // + PartitionSchema check = findPartitionSchema(partitionSchema.getName()); + if (check != null) { + if (!check.isShared()) { + // we don't overwrite shared objects. + if (shouldOverwrite(prompter, props, BaseMessages + .getString(PKG, "TransMeta.Message.OverwritePartitionSchemaYN", partitionSchema.getName()), + BaseMessages.getString(PKG, "TransMeta.Message.OverwriteConnection.DontShowAnyMoreMessage"))) { + addOrReplacePartitionSchema(partitionSchema); + } + } + } else { + partitionSchemas.add(partitionSchema); + } - // Now, get the previous steps for stepMeta recursively... - // We only do this when the beforeMap is not known yet... - // - for ( StepMeta prev : prevSteps ) { - Map beforePrevMap = updateFillStepMap( previousCache, beforeCache, originStepMeta, prev ); + } - // Keep a copy in the cache... - // - beforeCache.put( prev, beforePrevMap ); + // Have all step partitioning meta-data reference the correct schemas that we just loaded + // + for (int i = 0; i < nrSteps(); i++) { + StepPartitioningMeta stepPartitioningMeta = getStep(i).getStepPartitioningMeta(); + if (stepPartitioningMeta != null) { + stepPartitioningMeta.setPartitionSchemaAfterLoading(partitionSchemas); + } + StepPartitioningMeta targetStepPartitioningMeta = getStep(i).getTargetStepPartitioningMeta(); + if (targetStepPartitioningMeta != null) { + targetStepPartitioningMeta.setPartitionSchemaAfterLoading(partitionSchemas); + } + } - // Also add it to the new map for this step... - // - beforeMap.putAll( beforePrevMap ); - } + // Read the slave servers... + // + Node slaveServersNode = XMLHandler.getSubNode(infonode, XML_TAG_SLAVESERVERS); + int nrSlaveServers = XMLHandler.countNodes(slaveServersNode, SlaveServer.XML_TAG); + for (int i = 0; i < nrSlaveServers; i++) { + Node slaveServerNode = XMLHandler.getSubNodeByNr(slaveServersNode, SlaveServer.XML_TAG, i); + SlaveServer slaveServer = new SlaveServer(slaveServerNode); + slaveServer.shareVariablesWith(this); + + // Check if the object exists and if it's a shared object. + // If so, then we will keep the shared version, not this one. + // The stored XML is only for backup purposes. + SlaveServer check = findSlaveServer(slaveServer.getName()); + if (check != null) { + if (!check.isShared()) { + // we don't overwrite shared objects. + if (shouldOverwrite(prompter, props, + BaseMessages.getString(PKG, "TransMeta.Message.OverwriteSlaveServerYN", slaveServer.getName()), + BaseMessages.getString(PKG, "TransMeta.Message.OverwriteConnection.DontShowAnyMoreMessage"))) { + addOrReplaceSlaveServer(slaveServer); + } + } + } else { + slaveServers.add(slaveServer); + } + } - return beforeMap; - } + // Read the cluster schemas + // + Node clusterSchemasNode = XMLHandler.getSubNode(infonode, XML_TAG_CLUSTERSCHEMAS); + int nrClusterSchemas = XMLHandler.countNodes(clusterSchemasNode, ClusterSchema.XML_TAG); + for (int i = 0; i < nrClusterSchemas; i++) { + Node clusterSchemaNode = XMLHandler.getSubNodeByNr(clusterSchemasNode, ClusterSchema.XML_TAG, i); + ClusterSchema clusterSchema = new ClusterSchema(clusterSchemaNode, slaveServers); + clusterSchema.shareVariablesWith(this); + + // Check if the object exists and if it's a shared object. + // If so, then we will keep the shared version, not this one. + // The stored XML is only for backup purposes. + ClusterSchema check = findClusterSchema(clusterSchema.getName()); + if (check != null) { + if (!check.isShared()) { + // we don't overwrite shared objects. + if (shouldOverwrite(prompter, props, + BaseMessages.getString(PKG, "TransMeta.Message.OverwriteClusterSchemaYN", clusterSchema.getName()), + BaseMessages.getString(PKG, "TransMeta.Message.OverwriteConnection.DontShowAnyMoreMessage"))) { + addOrReplaceClusterSchema(clusterSchema); + } + } + } else { + clusterSchemas.add(clusterSchema); + } + } - /** - * Sorts the hops in a natural way: from beginning to end. - */ - public void sortHopsNatural() { - // Loop over the hops... - for ( int j = 0; j < nrTransHops(); j++ ) { - // Buble sort: we need to do this several times... - for ( int i = 0; i < nrTransHops() - 1; i++ ) { - TransHopMeta one = getTransHop( i ); - TransHopMeta two = getTransHop( i + 1 ); + // Have all step clustering schema meta-data reference the correct cluster schemas that we just loaded + // + for (int i = 0; i < nrSteps(); i++) { + getStep(i).setClusterSchemaAfterLoading(clusterSchemas); + } - StepMeta a = two.getFromStep(); - StepMeta b = one.getToStep(); + String srowset = XMLHandler.getTagValue(infonode, "size_rowset"); + sizeRowset = Const.toInt(srowset, Const.ROWS_IN_ROWSET); + sleepTimeEmpty = + Const.toInt(XMLHandler.getTagValue(infonode, "sleep_time_empty"), Const.TIMEOUT_GET_MILLIS); + sleepTimeFull = Const.toInt(XMLHandler.getTagValue(infonode, "sleep_time_full"), Const.TIMEOUT_PUT_MILLIS); + usingUniqueConnections = "Y".equalsIgnoreCase(XMLHandler.getTagValue(infonode, "unique_connections")); + + feedbackShown = !"N".equalsIgnoreCase(XMLHandler.getTagValue(infonode, "feedback_shown")); + feedbackSize = Const.toInt(XMLHandler.getTagValue(infonode, "feedback_size"), Const.ROWS_UPDATE); + usingThreadPriorityManagment = + !"N".equalsIgnoreCase(XMLHandler.getTagValue(infonode, "using_thread_priorities")); + + // Performance monitoring for steps... + // + capturingStepPerformanceSnapShots = + "Y".equalsIgnoreCase(XMLHandler.getTagValue(infonode, "capture_step_performance")); + stepPerformanceCapturingDelay = + Const.toLong(XMLHandler.getTagValue(infonode, "step_performance_capturing_delay"), 1000); + stepPerformanceCapturingSizeLimit = XMLHandler.getTagValue(infonode, "step_performance_capturing_size_limit"); + + // Created user/date + createdUser = XMLHandler.getTagValue(infonode, "created_user"); + String createDate = XMLHandler.getTagValue(infonode, "created_date"); + if (createDate != null) { + createdDate = XMLHandler.stringToDate(createDate); + } - if ( !findPrevious( a, b ) && !a.equals( b ) ) { - setTransHop( i + 1, one ); - setTransHop( i, two ); - } - } - } - } + // Changed user/date + modifiedUser = XMLHandler.getTagValue(infonode, "modified_user"); + String modDate = XMLHandler.getTagValue(infonode, "modified_date"); + if (modDate != null) { + modifiedDate = XMLHandler.stringToDate(modDate); + } - /** - * Determines the impact of the different steps in a transformation on databases, tables and field. - * - * @param impact - * An ArrayList of DatabaseImpact objects. - * @param monitor - * a progress monitor listener to be updated as the transformation is analyzed - * @throws KettleStepException - * if any errors occur during analysis - */ - public void analyseImpact( List impact, ProgressMonitorListener monitor ) throws KettleStepException { - if ( monitor != null ) { - monitor - .beginTask( BaseMessages.getString( PKG, "TransMeta.Monitor.DeterminingImpactTask.Title" ), nrSteps() ); - } - boolean stop = false; - for ( int i = 0; i < nrSteps() && !stop; i++ ) { - if ( monitor != null ) { - monitor.subTask( - BaseMessages.getString( PKG, "TransMeta.Monitor.LookingAtStepTask.Title" ) + ( i + 1 ) + "/" + nrSteps() ); - } - StepMeta stepMeta = getStep( i ); - - RowMetaInterface prev = getPrevStepFields( stepMeta ); - StepMetaInterface stepint = stepMeta.getStepMetaInterface(); - RowMetaInterface inform = null; - StepMeta[] lu = getInfoStep( stepMeta ); - if ( lu != null ) { - inform = getStepFields( lu ); - } else { - inform = stepint.getTableFields(); - } - - compatibleAnalyseImpactStep( impact, stepint, this, stepMeta, prev, inform ); - stepint.analyseImpact( impact, this, stepMeta, prev, null, null, inform, repository, metaStore ); - - if ( monitor != null ) { - monitor.worked( 1 ); - stop = monitor.isCanceled(); - } - } - - if ( monitor != null ) { - monitor.done(); - } - } - - @SuppressWarnings( "deprecation" ) - private void compatibleAnalyseImpactStep( List impact, StepMetaInterface stepint, TransMeta transMeta, - StepMeta stepMeta, RowMetaInterface prev, RowMetaInterface inform ) throws KettleStepException { - stepint.analyseImpact( impact, transMeta, stepMeta, prev, null, null, inform ); - } - - /** - * Proposes an alternative stepname when the original already exists. - * - * @param stepname - * The stepname to find an alternative for - * @return The suggested alternative stepname. - */ - public String getAlternativeStepname( String stepname ) { - String newname = stepname; - StepMeta stepMeta = findStep( newname ); - int nr = 1; - while ( stepMeta != null ) { - nr++; - newname = stepname + " " + nr; - stepMeta = findStep( newname ); - } + Node partitionDistNode = XMLHandler.getSubNode(transnode, SlaveStepCopyPartitionDistribution.XML_TAG); + if (partitionDistNode != null) { + slaveStepCopyPartitionDistribution = new SlaveStepCopyPartitionDistribution(partitionDistNode); + } else { + slaveStepCopyPartitionDistribution = new SlaveStepCopyPartitionDistribution(); // leave empty + } + + // Is this a slave transformation? + // + slaveTransformation = "Y".equalsIgnoreCase(XMLHandler.getTagValue(transnode, "slave_transformation")); + if (log.isDebug()) { + log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.NumberOfStepsReaded") + nrSteps()); + log.logDebug(BaseMessages.getString(PKG, "TransMeta.Log.NumberOfHopsReaded") + nrTransHops()); + } + sortSteps(); - return newname; - } + // Load the attribute groups map + // + attributesMap = AttributesUtil.loadAttributes(XMLHandler.getSubNode(transnode, AttributesUtil.XML_TAG)); - /** - * Builds a list of all the SQL statements that this transformation needs in order to work properly. - * - * @return An ArrayList of SQLStatement objects. - * @throws KettleStepException - * if any errors occur during SQL statement generation - */ - public List getSQLStatements() throws KettleStepException { - return getSQLStatements( null ); - } + keyForSessionKey = XMLHandler.stringToBinary(XMLHandler.getTagValue(infonode, "key_for_session_key")); + isKeyPrivate = "Y".equals(XMLHandler.getTagValue(infonode, "is_key_private")); - /** - * Builds a list of all the SQL statements that this transformation needs in order to work properly. - * - * @param monitor - * a progress monitor listener to be updated as the SQL statements are generated - * @return An ArrayList of SQLStatement objects. - * @throws KettleStepException - * if any errors occur during SQL statement generation - */ - public List getSQLStatements( ProgressMonitorListener monitor ) throws KettleStepException { - if ( monitor != null ) { - monitor.beginTask( BaseMessages.getString( PKG, "TransMeta.Monitor.GettingTheSQLForTransformationTask.Title" ), nrSteps() + 1 ); - } - List stats = new ArrayList(); - - for ( int i = 0; i < nrSteps(); i++ ) { - StepMeta stepMeta = getStep( i ); - if ( monitor != null ) { - monitor.subTask( - BaseMessages.getString( PKG, "TransMeta.Monitor.GettingTheSQLForStepTask.Title", "" + stepMeta ) ); - } - RowMetaInterface prev = getPrevStepFields( stepMeta ); - SQLStatement sqlCompat = compatibleStepMetaGetSQLStatements( stepMeta.getStepMetaInterface(), stepMeta, prev ); - if ( sqlCompat.getSQL() != null || sqlCompat.hasError() ) { - stats.add( sqlCompat ); - } - SQLStatement - sql = - stepMeta.getStepMetaInterface().getSQLStatements( this, stepMeta, prev, repository, metaStore ); - if ( sql.getSQL() != null || sql.hasError() ) { - stats.add( sql ); - } - if ( monitor != null ) { - monitor.worked( 1 ); - } - } - - // Also check the sql for the logtable... - // - if ( monitor != null ) { - monitor.subTask( BaseMessages.getString( PKG, "TransMeta.Monitor.GettingTheSQLForTransformationTask.Title2" ) ); - } - if ( transLogTable.getDatabaseMeta() != null && ( !Const.isEmpty( transLogTable.getTableName() ) || !Const - .isEmpty( performanceLogTable.getTableName() ) ) ) { - try { - for ( LogTableInterface logTable : new LogTableInterface[] { transLogTable, performanceLogTable, - channelLogTable, stepLogTable, } ) { - if ( logTable.getDatabaseMeta() != null && !Const.isEmpty( logTable.getTableName() ) ) { - - Database db = null; - try { - db = new Database( this, transLogTable.getDatabaseMeta() ); - db.shareVariablesWith( this ); - db.connect(); - - RowMetaInterface fields = logTable.getLogRecord( LogStatus.START, null, null ).getRowMeta(); - String - schemaTable = - logTable.getDatabaseMeta() - .getQuotedSchemaTableCombination( logTable.getSchemaName(), logTable.getTableName() ); - String sql = db.getDDL( schemaTable, fields ); - if ( !Const.isEmpty( sql ) ) { - SQLStatement stat = new SQLStatement( "", transLogTable.getDatabaseMeta(), sql ); - stats.add( stat ); - } - } catch ( Exception e ) { - throw new KettleDatabaseException( - "Unable to connect to logging database [" + logTable.getDatabaseMeta() + "]", e ); + } catch (KettleXMLException xe) { + throw new KettleXMLException(BaseMessages.getString(PKG, "TransMeta.Exception.ErrorReadingTransformation"), + xe); + } catch (KettleException e) { + throw new KettleXMLException(e); } finally { - if ( db != null ) { - db.disconnect(); - } + initializeVariablesFrom(null); + if (setInternalVariables) { + setInternalKettleVariables(); + } + + ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.TransformationMetaLoaded.id, this); + } + } catch (Exception e) { + // See if we have missing plugins to report, those take precedence! + // + if (!missingPluginsException.getMissingPluginDetailsList().isEmpty()) { + throw missingPluginsException; + } else { + throw new KettleXMLException(BaseMessages.getString(PKG, "TransMeta.Exception.ErrorReadingTransformation"), + e); + } + } finally { + if (!missingPluginsException.getMissingPluginDetailsList().isEmpty()) { + throw missingPluginsException; } - } } - } catch ( KettleDatabaseException dbe ) { - SQLStatement stat = new SQLStatement( "", transLogTable.getDatabaseMeta(), null ); - stat.setError( - BaseMessages.getString( PKG, "TransMeta.SQLStatement.ErrorDesc.ErrorObtainingTransformationLogTableInfo" ) - + dbe.getMessage() ); - stats.add( stat ); - } } - if ( monitor != null ) { - monitor.worked( 1 ); - } - if ( monitor != null ) { - monitor.done(); + + public byte[] getKey() { + return keyForSessionKey; } - return stats; - } + public void setKey(byte[] key) { + this.keyForSessionKey = key; + } - @SuppressWarnings( "deprecation" ) - private SQLStatement compatibleStepMetaGetSQLStatements( StepMetaInterface stepMetaInterface, StepMeta stepMeta, - RowMetaInterface prev ) throws KettleStepException { - return stepMetaInterface.getSQLStatements( this, stepMeta, prev ); - } + public boolean isPrivateKey() { + return isKeyPrivate; + } - /** - * Get the SQL statements (needed to run this transformation) as a single String. - * - * @return the SQL statements needed to run this transformation - * @throws KettleStepException - * if any errors occur during SQL statement generation - */ - public String getSQLStatementsString() throws KettleStepException { - String sql = ""; - List stats = getSQLStatements(); - for ( int i = 0; i < stats.size(); i++ ) { - SQLStatement stat = stats.get( i ); - if ( !stat.hasError() && stat.hasSQL() ) { - sql += stat.getSQL(); - } + public void setPrivateKey(boolean privateKey) { + this.isKeyPrivate = privateKey; } - return sql; - } + /** + * Reads the shared objects (steps, connections, etc.). + * + * @return the shared objects + * @throws KettleException if any errors occur while reading the shared objects + */ + public SharedObjects readSharedObjects() throws KettleException { + // Extract the shared steps, connections, etc. using the SharedObjects class + // + String soFile = environmentSubstitute(sharedObjectsFile); + SharedObjects sharedObjects = new SharedObjects(soFile); + if (sharedObjects.getObjectsMap().isEmpty()) { + log.logDetailed(BaseMessages.getString(PKG, "TransMeta.Log.EmptySharedObjectsFile", soFile)); + } - /** - * Checks all the steps and fills a List of (CheckResult) remarks. - * - * @param remarks - * The remarks list to add to. - * @param only_selected - * true to check only the selected steps, false for all steps - * @param monitor - * a progress monitor listener to be updated as the SQL statements are generated - */ - @Deprecated - public void checkSteps( List remarks, boolean only_selected, ProgressMonitorListener monitor ) { - checkSteps( remarks, only_selected, monitor, this, null, null ); - } + // First read the databases... + // We read databases & slaves first because there might be dependencies that need to be resolved. + // + for (SharedObjectInterface object : sharedObjects.getObjectsMap().values()) { + if (object instanceof DatabaseMeta) { + DatabaseMeta databaseMeta = (DatabaseMeta) object; + databaseMeta.shareVariablesWith(this); + addOrReplaceDatabase(databaseMeta); + } else if (object instanceof SlaveServer) { + SlaveServer slaveServer = (SlaveServer) object; + slaveServer.shareVariablesWith(this); + addOrReplaceSlaveServer(slaveServer); + } else if (object instanceof StepMeta) { + StepMeta stepMeta = (StepMeta) object; + addOrReplaceStep(stepMeta); + } else if (object instanceof PartitionSchema) { + PartitionSchema partitionSchema = (PartitionSchema) object; + addOrReplacePartitionSchema(partitionSchema); + } else if (object instanceof ClusterSchema) { + ClusterSchema clusterSchema = (ClusterSchema) object; + clusterSchema.shareVariablesWith(this); + addOrReplaceClusterSchema(clusterSchema); + } + } - /** - * Checks all the steps and fills a List of (CheckResult) remarks. - * - * @param remarks - * The remarks list to add to. - * @param only_selected - * true to check only the selected steps, false for all steps - * @param monitor - * a progress monitor listener to be updated as the SQL statements are generated - */ - public void checkSteps( List remarks, boolean only_selected, ProgressMonitorListener monitor, - VariableSpace space, Repository repository, IMetaStore metaStore ) { - try { - remarks.clear(); // Start with a clean slate... - - Map values = new Hashtable(); - String[] stepnames; - StepMeta[] steps; - List selectedSteps = getSelectedSteps(); - if ( !only_selected || selectedSteps.isEmpty() ) { - stepnames = getStepNames(); - steps = getStepsArray(); - } else { - stepnames = getSelectedStepNames(); - steps = selectedSteps.toArray( new StepMeta[selectedSteps.size()] ); - } - - ExtensionPointHandler.callExtensionPoint( getLogChannel(), KettleExtensionPoint.BeforeCheckSteps.id, - new CheckStepsExtension( remarks, space, this, steps, repository, metaStore ) ); - - boolean stop_checking = false; - - if ( monitor != null ) { - monitor.beginTask( BaseMessages.getString( PKG, "TransMeta.Monitor.VerifyingThisTransformationTask.Title" ), - steps.length + 2 ); - } - - for ( int i = 0; i < steps.length && !stop_checking; i++ ) { - if ( monitor != null ) { - monitor.subTask( BaseMessages.getString( PKG, "TransMeta.Monitor.VerifyingStepTask.Title", stepnames[i] ) ); - } - - StepMeta stepMeta = steps[i]; - - int nrinfo = findNrInfoSteps( stepMeta ); - StepMeta[] infostep = null; - if ( nrinfo > 0 ) { - infostep = getInfoStep( stepMeta ); - } - - RowMetaInterface info = null; - if ( infostep != null ) { - try { - info = getStepFields( infostep ); - } catch ( KettleStepException kse ) { - info = null; - CheckResult - cr = - new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages.getString( PKG, - "TransMeta.CheckResult.TypeResultError.ErrorOccurredGettingStepInfoFields.Description", - "" + stepMeta, Const.CR + kse.getMessage() ), stepMeta ); - remarks.add( cr ); - } - } - - // The previous fields from non-informative steps: - RowMetaInterface prev = null; - try { - prev = getPrevStepFields( stepMeta ); - } catch ( KettleStepException kse ) { - CheckResult - cr = - new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages - .getString( PKG, "TransMeta.CheckResult.TypeResultError.ErrorOccurredGettingInputFields.Description", - "" + stepMeta, Const.CR + kse.getMessage() ), stepMeta ); - remarks.add( cr ); - // This is a severe error: stop checking... - // Otherwise we wind up checking time & time again because nothing gets put in the database - // cache, the timeout of certain databases is very long... (Oracle) - stop_checking = true; - } - - if ( isStepUsedInTransHops( stepMeta ) ) { - // Get the input & output steps! - // Copy to arrays: - String[] input = getPrevStepNames( stepMeta ); - String[] output = getNextStepNames( stepMeta ); - - // Check step specific info... - ExtensionPointHandler.callExtensionPoint( getLogChannel(), KettleExtensionPoint.BeforeCheckStep.id, - new CheckStepsExtension( remarks, space, this, new StepMeta[] { stepMeta }, repository, metaStore ) ); - stepMeta.check( remarks, this, prev, input, output, info, space, repository, metaStore ); - ExtensionPointHandler.callExtensionPoint( getLogChannel(), KettleExtensionPoint.AfterCheckStep.id, - new CheckStepsExtension( remarks, space, this, new StepMeta[] { stepMeta }, repository, metaStore ) ); - - // See if illegal characters etc. were used in field-names... - if ( prev != null ) { - for ( int x = 0; x < prev.size(); x++ ) { - ValueMetaInterface v = prev.getValueMeta( x ); - String name = v.getName(); - if ( name == null ) { - values.put( v, - BaseMessages.getString( PKG, "TransMeta.Value.CheckingFieldName.FieldNameIsEmpty.Description" ) ); - } else if ( name.indexOf( ' ' ) >= 0 ) { - values.put( v, BaseMessages - .getString( PKG, "TransMeta.Value.CheckingFieldName.FieldNameContainsSpaces.Description" ) ); - } else { - char[] list = - new char[] { '.', ',', '-', '/', '+', '*', '\'', '\t', '"', '|', '@', '(', ')', '{', '}', '!', - '^' }; - for ( int c = 0; c < list.length; c++ ) { - if ( name.indexOf( list[c] ) >= 0 ) { - values.put( v, BaseMessages.getString( PKG, - "TransMeta.Value.CheckingFieldName.FieldNameContainsUnfriendlyCodes.Description", - String.valueOf( list[c] ) ) ); - } + return sharedObjects; + } + + /** + * Gets a List of all the steps that are used in at least one active hop. These steps will be used to execute the + * transformation. The others will not be executed.
+ * Update 3.0 : we also add those steps that are not linked to another hop, but have at least one remote input or + * output step defined. + * + * @param all true if you want to get ALL the steps from the transformation, false otherwise + * @return A List of steps + */ + public List getTransHopSteps(boolean all) { + List st = new ArrayList(); + int idx; + + for (int x = 0; x < nrTransHops(); x++) { + TransHopMeta hi = getTransHop(x); + if (hi.isEnabled() || all) { + idx = st.indexOf(hi.getFromStep()); // FROM + if (idx < 0) { + st.add(hi.getFromStep()); } - } - } - - // Check if 2 steps with the same name are entering the step... - if ( prev.size() > 1 ) { - String[] fieldNames = prev.getFieldNames(); - String[] sortedNames = Const.sortStrings( fieldNames ); - - String prevName = sortedNames[0]; - for ( int x = 1; x < sortedNames.length; x++ ) { - // Checking for doubles - if ( prevName.equalsIgnoreCase( sortedNames[x] ) ) { - // Give a warning!! - CheckResult - cr = - new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages - .getString( PKG, "TransMeta.CheckResult.TypeResultWarning.HaveTheSameNameField.Description", - prevName ), stepMeta ); - remarks.add( cr ); - } else { - prevName = sortedNames[x]; + + idx = st.indexOf(hi.getToStep()); // TO + if (idx < 0) { + st.add(hi.getToStep()); } - } - } - } else { - CheckResult - cr = - new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages - .getString( PKG, "TransMeta.CheckResult.TypeResultError.CannotFindPreviousFields.Description" ) - + stepMeta.getName(), stepMeta ); - remarks.add( cr ); - } - } else { - CheckResult - cr = - new CheckResult( CheckResultInterface.TYPE_RESULT_WARNING, - BaseMessages.getString( PKG, "TransMeta.CheckResult.TypeResultWarning.StepIsNotUsed.Description" ), - stepMeta ); - remarks.add( cr ); + } } - // Also check for mixing rows... - try { - checkRowMixingStatically( stepMeta, null ); - } catch ( KettleRowException e ) { - CheckResult cr = new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, e.getMessage(), stepMeta ); - remarks.add( cr ); - } - - if ( monitor != null ) { - monitor.worked( 1 ); // progress bar... - if ( monitor.isCanceled() ) { - stop_checking = true; - } - } - } - - // Also, check the logging table of the transformation... - if ( monitor == null || !monitor.isCanceled() ) { - if ( monitor != null ) { - monitor.subTask( BaseMessages.getString( PKG, "TransMeta.Monitor.CheckingTheLoggingTableTask.Title" ) ); - } - if ( transLogTable.getDatabaseMeta() != null ) { - Database logdb = new Database( this, transLogTable.getDatabaseMeta() ); - logdb.shareVariablesWith( this ); - try { - logdb.connect(); - CheckResult - cr = - new CheckResult( CheckResultInterface.TYPE_RESULT_OK, - BaseMessages.getString( PKG, "TransMeta.CheckResult.TypeResultOK.ConnectingWorks.Description" ), - null ); - remarks.add( cr ); - - if ( transLogTable.getTableName() != null ) { - if ( logdb.checkTableExists( transLogTable.getTableName() ) ) { - cr = - new CheckResult( CheckResultInterface.TYPE_RESULT_OK, BaseMessages - .getString( PKG, "TransMeta.CheckResult.TypeResultOK.LoggingTableExists.Description", - transLogTable.getTableName() ), null ); - remarks.add( cr ); - - RowMetaInterface fields = transLogTable.getLogRecord( LogStatus.START, null, null ).getRowMeta(); - String sql = logdb.getDDL( transLogTable.getTableName(), fields ); - if ( sql == null || sql.length() == 0 ) { - cr = - new CheckResult( CheckResultInterface.TYPE_RESULT_OK, - BaseMessages.getString( PKG, "TransMeta.CheckResult.TypeResultOK.CorrectLayout.Description" ), - null ); - remarks.add( cr ); - } else { - cr = - new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages.getString( PKG, - "TransMeta.CheckResult.TypeResultError.LoggingTableNeedsAdjustments.Description" ) + Const.CR - + sql, null ); - remarks.add( cr ); + // Also, add the steps that need to be painted, but are not part of a hop + for (int x = 0; x < nrSteps(); x++) { + StepMeta stepMeta = getStep(x); + if (stepMeta.isDrawn() && !isStepUsedInTransHops(stepMeta)) { + st.add(stepMeta); + } + if (!stepMeta.getRemoteInputSteps().isEmpty() || !stepMeta.getRemoteOutputSteps().isEmpty()) { + if (!st.contains(stepMeta)) { + st.add(stepMeta); } + } + } - } else { - cr = - new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages - .getString( PKG, "TransMeta.CheckResult.TypeResultError.LoggingTableDoesNotExist.Description" ), - null ); - remarks.add( cr ); - } - } else { - cr = - new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages - .getString( PKG, "TransMeta.CheckResult.TypeResultError.LogTableNotSpecified.Description" ), - null ); - remarks.add( cr ); - } - } catch ( KettleDatabaseException dbe ) { - // Ignore errors - } finally { - logdb.disconnect(); - } - } - if ( monitor != null ) { - monitor.worked( 1 ); - } - - } - - if ( monitor != null ) { - monitor.subTask( BaseMessages - .getString( PKG, "TransMeta.Monitor.CheckingForDatabaseUnfriendlyCharactersInFieldNamesTask.Title" ) ); - } - if ( values.size() > 0 ) { - for ( ValueMetaInterface v : values.keySet() ) { - String message = values.get( v ); - CheckResult - cr = - new CheckResult( CheckResultInterface.TYPE_RESULT_WARNING, BaseMessages - .getString( PKG, "TransMeta.CheckResult.TypeResultWarning.Description", v.getName(), message, - v.getOrigin() ), findStep( v.getOrigin() ) ); - remarks.add( cr ); - } - } else { - CheckResult - cr = - new CheckResult( CheckResultInterface.TYPE_RESULT_OK, - BaseMessages.getString( PKG, "TransMeta.CheckResult.TypeResultOK.Description" ), null ); - remarks.add( cr ); - } - if ( monitor != null ) { - monitor.worked( 1 ); - } - ExtensionPointHandler.callExtensionPoint( getLogChannel(), KettleExtensionPoint.AfterCheckSteps.id, - new CheckStepsExtension( remarks, space, this, steps, repository, metaStore ) ); - } catch ( Exception e ) { - log.logError( Const.getStackTracker( e ) ); - throw new RuntimeException( e ); - } - - } - - /** - * Gets the result rows. - * - * @return a list containing the result rows. - * @deprecated Moved to Trans to make this class stateless - */ - @Deprecated - public List getResultRows() { - return resultRows; - } - - /** - * Sets the list of result rows. - * - * @param resultRows - * The list of result rows to set. - * @deprecated Moved to Trans to make this class stateless - */ - @Deprecated - public void setResultRows( List resultRows ) { - this.resultRows = resultRows; - } - - /** - * Gets the repository directory path and name of the transformation. - * - * @return The repository directory path plus the name of the transformation - */ - public String getPathAndName() { - if ( getRepositoryDirectory().isRoot() ) { - return getRepositoryDirectory().getPath() + getName(); - } else { - return getRepositoryDirectory().getPath() + RepositoryDirectory.DIRECTORY_SEPARATOR + getName(); + return st; } - } - - /** - * Gets the arguments used for this transformation. - * - * @return an array of String arguments for the transformation - * @deprecated moved to Trans - */ - @Deprecated - public String[] getArguments() { - return arguments; - } - - /** - * Sets the arguments used for this transformation. - * - * @param arguments - * The arguments to set. - * @deprecated moved to Trans - */ - @Deprecated - public void setArguments( String[] arguments ) { - this.arguments = arguments; - } - /** - * Gets the counters (database sequence values, e.g.) for the transformation. - * - * @return a named table of counters. - * @deprecated moved to Trans - */ - @Deprecated - public Hashtable getCounters() { - return counters; - } + /** + * Checks if a step has been used in a hop or not. + * + * @param stepMeta The step queried. + * @return true if a step is used in a hop (active or not), false otherwise + */ + public boolean isStepUsedInTransHops(StepMeta stepMeta) { + TransHopMeta fr = findTransHopFrom(stepMeta); + TransHopMeta to = findTransHopTo(stepMeta); + if (fr != null || to != null) { + return true; + } + return false; + } - /** - * Sets the counters (database sequence values, e.g.) for the transformation. - * - * @param counters - * The counters to set. - * @deprecated moved to Trans - */ - @Deprecated - public void setCounters( Hashtable counters ) { - this.counters = counters; - } + /** + * Clears the different changed flags of the transformation. + */ + @Override + public void clearChanged() { + changed_steps = false; + changed_hops = false; + + for (int i = 0; i < nrSteps(); i++) { + getStep(i).setChanged(false); + if (getStep(i).getStepPartitioningMeta() != null) { + getStep(i).getStepPartitioningMeta().hasChanged(false); + } + } + for (int i = 0; i < nrTransHops(); i++) { + getTransHop(i).setChanged(false); + } + for (int i = 0; i < partitionSchemas.size(); i++) { + partitionSchemas.get(i).setChanged(false); + } + for (int i = 0; i < clusterSchemas.size(); i++) { + clusterSchemas.get(i).setChanged(false); + } - /** - * Gets a list of dependencies for the transformation - * - * @return a list of the dependencies for the transformation - */ - public List getDependencies() { - return dependencies; - } + super.clearChanged(); + } - /** - * Sets the dependencies for the transformation. - * - * @param dependencies - * The dependency list to set. - */ - public void setDependencies( List dependencies ) { - this.dependencies = dependencies; - } - - /** - * Gets the database connection associated with "max date" processing. The connection, along with a specified table - * and field, allows for the filtering of the number of rows to process in a transformation by time, such as only - * processing the rows/records since the last time the transformation ran correctly. This can be used for auditing and - * throttling data during warehousing operations. - * - * @return Returns the meta-data associated with the most recent database connection. - */ - public DatabaseMeta getMaxDateConnection() { - return maxDateConnection; - } + /** + * Checks whether or not the steps have changed. + * + * @return true if the steps have been changed, false otherwise + */ + public boolean haveStepsChanged() { + if (changed_steps) { + return true; + } - /** - * Sets the database connection associated with "max date" processing. - * - * @param maxDateConnection - * the database meta-data to set - * @see #getMaxDateConnection() - */ - public void setMaxDateConnection( DatabaseMeta maxDateConnection ) { - this.maxDateConnection = maxDateConnection; - } + for (int i = 0; i < nrSteps(); i++) { + StepMeta stepMeta = getStep(i); + if (stepMeta.hasChanged()) { + return true; + } + if (stepMeta.getStepPartitioningMeta() != null && stepMeta.getStepPartitioningMeta().hasChanged()) { + return true; + } + } + return false; + } - /** - * Gets the maximum date difference between start and end dates for row/record processing. This can be used for - * auditing and throttling data during warehousing operations. - * - * @return the maximum date difference - */ - public double getMaxDateDifference() { - return maxDateDifference; - } + /** + * Checks whether or not any of the hops have been changed. + * + * @return true if a hop has been changed, false otherwise + */ + public boolean haveHopsChanged() { + if (changed_hops) { + return true; + } - /** - * Sets the maximum date difference between start and end dates for row/record processing. - * - * @param maxDateDifference - * The date difference to set. - * @see #getMaxDateDifference() - */ - public void setMaxDateDifference( double maxDateDifference ) { - this.maxDateDifference = maxDateDifference; - } - - /** - * Gets the date field associated with "max date" processing. This allows for the filtering of the number of rows to - * process in a transformation by time, such as only processing the rows/records since the last time the - * transformation ran correctly. This can be used for auditing and throttling data during warehousing operations. - * - * @return a string representing the date for the most recent database connection. - * @see #getMaxDateConnection() - */ - public String getMaxDateField() { - return maxDateField; - } + for (int i = 0; i < nrTransHops(); i++) { + TransHopMeta hi = getTransHop(i); + if (hi.hasChanged()) { + return true; + } + } + return false; + } - /** - * Sets the date field associated with "max date" processing. - * - * @param maxDateField - * The date field to set. - * @see #getMaxDateField() - */ - public void setMaxDateField( String maxDateField ) { - this.maxDateField = maxDateField; - } - - /** - * Gets the amount by which to increase the "max date" difference. This is used in "max date" processing, and can be - * used to provide more fine-grained control of the date range. For example, if the end date specifies a minute for - * which the data is not complete, you can "roll-back" the end date by one minute by - * - * @return Returns the maxDateOffset. - * @see #setMaxDateOffset(double) - */ - public double getMaxDateOffset() { - return maxDateOffset; - } - - /** - * Sets the amount by which to increase the end date in "max date" processing. This can be used to provide more - * fine-grained control of the date range. For example, if the end date specifies a minute for which the data is not - * complete, you can "roll-back" the end date by one minute by setting the offset to -60. - * - * @param maxDateOffset - * The maxDateOffset to set. - */ - public void setMaxDateOffset( double maxDateOffset ) { - this.maxDateOffset = maxDateOffset; - } - - /** - * Gets the database table providing a date to be used in "max date" processing. This allows for the filtering of the - * number of rows to process in a transformation by time, such as only processing the rows/records since the last time - * the transformation ran correctly. - * - * @return Returns the maxDateTable. - * @see #getMaxDateConnection() - */ - public String getMaxDateTable() { - return maxDateTable; - } + /** + * Checks whether or not any of the partitioning schemas have been changed. + * + * @return true if the partitioning schemas have been changed, false otherwise + */ + public boolean havePartitionSchemasChanged() { + for (int i = 0; i < partitionSchemas.size(); i++) { + PartitionSchema ps = partitionSchemas.get(i); + if (ps.hasChanged()) { + return true; + } + } - /** - * Sets the table name associated with "max date" processing. - * - * @param maxDateTable - * The maxDateTable to set. - * @see #getMaxDateTable() - */ - public void setMaxDateTable( String maxDateTable ) { - this.maxDateTable = maxDateTable; - } + return false; + } - /** - * Gets the size of the rowsets. - * - * @return Returns the size of the rowsets. - */ - public int getSizeRowset() { - String rowSetSize = getVariable( Const.KETTLE_TRANS_ROWSET_SIZE ); - int altSize = Const.toInt( rowSetSize, 0 ); - if ( altSize > 0 ) { - return altSize; - } else { - return sizeRowset; - } - } - - /** - * Sets the size of the rowsets. This method allows you to change the size of the buffers between the connected steps - * in a transformation. NOTE: Do not change this parameter unless you are running low on memory, for example. - * - * @param sizeRowset - * The sizeRowset to set. - */ - public void setSizeRowset( int sizeRowset ) { - this.sizeRowset = sizeRowset; - } + /** + * Checks whether or not any of the clustering schemas have been changed. + * + * @return true if the clustering schemas have been changed, false otherwise + */ + public boolean haveClusterSchemasChanged() { + for (int i = 0; i < clusterSchemas.size(); i++) { + ClusterSchema cs = clusterSchemas.get(i); + if (cs.hasChanged()) { + return true; + } + } - /** - * Gets the database cache object. - * - * @return the database cache object. - */ - public DBCache getDbCache() { - return dbCache; - } + return false; + } - /** - * Sets the database cache object. - * - * @param dbCache - * the database cache object to set - */ - public void setDbCache( DBCache dbCache ) { - this.dbCache = dbCache; - } - - /** - * Gets the version of the transformation. - * - * @return The version of the transformation - */ - public String getTransversion() { - return trans_version; - } - - /** - * Sets the version of the transformation. - * - * @param n - * The new version description of the transformation - */ - public void setTransversion( String n ) { - trans_version = n; - } + /** + * Checks whether or not the transformation has changed. + * + * @return true if the transformation has changed, false otherwise + */ + @Override + public boolean hasChanged() { + if (super.hasChanged()) { + return true; + } + if (haveStepsChanged()) { + return true; + } + if (haveHopsChanged()) { + return true; + } + if (havePartitionSchemasChanged()) { + return true; + } + if (haveClusterSchemasChanged()) { + return true; + } - /** - * Sets the status of the transformation. - * - * @param n - * The new status description of the transformation - */ - public void setTransstatus( int n ) { - trans_status = n; - } + return false; + } - /** - * Gets the status of the transformation. - * - * @return The status of the transformation - */ - public int getTransstatus() { - return trans_status; - } + /** + * See if there are any loops in the transformation, starting at the indicated step. This works by looking at all the + * previous steps. If you keep going backward and find the step, there is a loop. Both the informational and the + * normal steps need to be checked for loops! + * + * @param stepMeta The step position to start looking + * @return true if a loop has been found, false if no loop is found. + */ + public boolean hasLoop(StepMeta stepMeta) { + clearLoopCache(); + return hasLoop(stepMeta, null, true) || hasLoop(stepMeta, null, false); + } - /** - * Gets a textual representation of the transformation. If its name has been set, it will be returned, otherwise the - * classname is returned. - * - * @return the textual representation of the transformation. - */ - @Override - public String toString() { - if ( !Const.isEmpty( filename ) ) { - if ( Const.isEmpty( name ) ) { - return filename; - } else { - return filename + " : " + name; - } - } - - if ( name != null ) { - if ( directory != null ) { - String path = directory.getPath(); - if ( path.endsWith( RepositoryDirectory.DIRECTORY_SEPARATOR ) ) { - return path + name; - } else { - return path + RepositoryDirectory.DIRECTORY_SEPARATOR + name; + /** + * See if there are any loops in the transformation, starting at the indicated step. This works by looking at all the + * previous steps. If you keep going backward and find the original step again, there is a loop. + * + * @param stepMeta The step position to start looking + * @param lookup The original step when wandering around the transformation. + * @param info Check the informational steps or not. + * @return true if a loop has been found, false if no loop is found. + */ + private boolean hasLoop(StepMeta stepMeta, StepMeta lookup, boolean info) { + String + cacheKey = + stepMeta.getName() + " - " + (lookup != null ? lookup.getName() : "") + " - " + (info ? "true" : "false"); + Boolean loop = loopCache.get(cacheKey); + if (loop != null) { + return loop.booleanValue(); } - } else { - return name; - } - } else { - return TransMeta.class.getName(); - } - } - /** - * Cancels queries opened for checking & fieldprediction. - * - * @throws KettleDatabaseException - * if any errors occur during query cancellation - */ - public void cancelQueries() throws KettleDatabaseException { - for ( int i = 0; i < nrSteps(); i++ ) { - getStep( i ).getStepMetaInterface().cancelQueries(); - } - } + boolean hasLoop = false; + + int nr = findNrPrevSteps(stepMeta, info); + for (int i = 0; i < nr && !hasLoop; i++) { + StepMeta prevStepMeta = findPrevStep(stepMeta, i, info); + if (prevStepMeta != null) { + if (prevStepMeta.equals(stepMeta)) { + hasLoop = true; + break; // no need to check more but caching this one below + } else if (prevStepMeta.equals(lookup)) { + hasLoop = true; + break; // no need to check more but caching this one below + } else if (hasLoop(prevStepMeta, lookup == null ? stepMeta : lookup, info)) { + hasLoop = true; + break; // no need to check more but caching this one below + } + } + } - /** - * Gets the arguments (and their values) used by this transformation. If argument values are supplied by parameter, - * the values will used for the arguments. If the values are null or empty, the method will attempt to use argument - * values from a previous execution. - * - * @param arguments - * the values for the arguments - * @return A row with the used arguments (and their values) in it. - */ - public Map getUsedArguments( String[] arguments ) { - Map transArgs = new HashMap(); + // Store in the cache... + // + loopCache.put(cacheKey, Boolean.valueOf(hasLoop)); - for ( int i = 0; i < nrSteps(); i++ ) { - StepMetaInterface smi = getStep( i ).getStepMetaInterface(); - Map stepArgs = smi.getUsedArguments(); // Get the command line arguments that this step uses. - if ( stepArgs != null ) { - transArgs.putAll( stepArgs ); - } + return hasLoop; } - // OK, so perhaps, we can use the arguments from a previous execution? - String[] saved = Props.isInitialized() ? Props.getInstance().getLastArguments() : null; + /** + * Mark all steps in the transformation as selected. + */ + public void selectAll() { + int i; + for (i = 0; i < nrSteps(); i++) { + StepMeta stepMeta = getStep(i); + stepMeta.setSelected(true); + } + for (i = 0; i < nrNotes(); i++) { + NotePadMeta ni = getNote(i); + ni.setSelected(true); + } - // Set the default values on it... - // Also change the name to "Argument 1" .. "Argument 10" - // - for ( String argument : transArgs.keySet() ) { - String value = ""; - int argNr = Const.toInt( argument, -1 ); - if ( arguments != null && argNr > 0 && argNr <= arguments.length ) { - value = Const.NVL( arguments[argNr - 1], "" ); - } - if ( value.length() == 0 ) { // try the saved option... + setChanged(); + notifyObservers("refreshGraph"); + } - if ( argNr > 0 && argNr < saved.length && saved[argNr] != null ) { - value = saved[argNr - 1]; + /** + * Clear the selection of all steps. + */ + public void unselectAll() { + int i; + for (i = 0; i < nrSteps(); i++) { + StepMeta stepMeta = getStep(i); + stepMeta.setSelected(false); + } + for (i = 0; i < nrNotes(); i++) { + NotePadMeta ni = getNote(i); + ni.setSelected(false); } - } - transArgs.put( argument, value ); } - return transArgs; - } - - /** - * Gets the amount of time (in nano-seconds) to wait while the input buffer is empty. - * - * @return the number of nano-seconds to wait while the input buffer is empty. - */ - public int getSleepTimeEmpty() { - return sleepTimeEmpty; - } + /** + * Get an array of all the selected step locations. + * + * @return The selected step locations. + */ + public Point[] getSelectedStepLocations() { + List points = new ArrayList(); - /** - * Gets the amount of time (in nano-seconds) to wait while the input buffer is full. - * - * @return the number of nano-seconds to wait while the input buffer is full. - */ - public int getSleepTimeFull() { - return sleepTimeFull; - } + for (StepMeta stepMeta : getSelectedSteps()) { + Point p = stepMeta.getLocation(); + points.add(new Point(p.x, p.y)); // explicit copy of location + } - /** - * Sets the amount of time (in nano-seconds) to wait while the input buffer is empty. - * - * @param sleepTimeEmpty - * the number of nano-seconds to wait while the input buffer is empty. - */ - public void setSleepTimeEmpty( int sleepTimeEmpty ) { - this.sleepTimeEmpty = sleepTimeEmpty; - } + return points.toArray(new Point[points.size()]); + } - /** - * Sets the amount of time (in nano-seconds) to wait while the input buffer is full. - * - * @param sleepTimeFull - * the number of nano-seconds to wait while the input buffer is full. - */ - public void setSleepTimeFull( int sleepTimeFull ) { - this.sleepTimeFull = sleepTimeFull; - } + /** + * Get an array of all the selected note locations. + * + * @return The selected note locations. + */ + public Point[] getSelectedNoteLocations() { + List points = new ArrayList(); - /** - * This method asks all steps in the transformation whether or not the specified database connection is used. The - * connection is used in the transformation if any of the steps uses it or if it is being used to log to. - * - * @param databaseMeta - * The connection to check - * @return true if the connection is used in this transformation. - */ - public boolean isDatabaseConnectionUsed( DatabaseMeta databaseMeta ) { - for ( int i = 0; i < nrSteps(); i++ ) { - StepMeta stepMeta = getStep( i ); - DatabaseMeta[] dbs = stepMeta.getStepMetaInterface().getUsedDatabaseConnections(); - for ( int d = 0; d < dbs.length; d++ ) { - if ( dbs[d].equals( databaseMeta ) ) { - return true; + for (NotePadMeta ni : getSelectedNotes()) { + Point p = ni.getLocation(); + points.add(new Point(p.x, p.y)); // explicit copy of location } - } - } - if ( transLogTable.getDatabaseMeta() != null && transLogTable.getDatabaseMeta().equals( databaseMeta ) ) { - return true; + return points.toArray(new Point[points.size()]); } - return false; - } + /** + * Gets a list of the selected steps. + * + * @return A list of all the selected steps. + */ + public List getSelectedSteps() { + List selection = new ArrayList(); + for (StepMeta stepMeta : steps) { + if (stepMeta.isSelected()) { + selection.add(stepMeta); + } - /* - * public List getInputFiles() { return inputFiles; } - * - * public void setInputFiles(List inputFiles) { this.inputFiles = inputFiles; } - */ + } + return selection; + } - /** - * Gets a list of all the strings used in this transformation. The parameters indicate which collections to search and - * which to exclude. - * - * @param searchSteps - * true if steps should be searched, false otherwise - * @param searchDatabases - * true if databases should be searched, false otherwise - * @param searchNotes - * true if notes should be searched, false otherwise - * @param includePasswords - * true if passwords should be searched, false otherwise - * @return a list of search results for strings used in the transformation. - */ - public List getStringList( boolean searchSteps, boolean searchDatabases, boolean searchNotes, - boolean includePasswords ) { - List stringList = new ArrayList(); - - if ( searchSteps ) { - // Loop over all steps in the transformation and see what the used vars are... - for ( int i = 0; i < nrSteps(); i++ ) { - StepMeta stepMeta = getStep( i ); - stringList.add( new StringSearchResult( stepMeta.getName(), stepMeta, this, - BaseMessages.getString( PKG, "TransMeta.SearchMetadata.StepName" ) ) ); - if ( stepMeta.getDescription() != null ) { - stringList.add( new StringSearchResult( stepMeta.getDescription(), stepMeta, this, - BaseMessages.getString( PKG, "TransMeta.SearchMetadata.StepDescription" ) ) ); - } - StepMetaInterface metaInterface = stepMeta.getStepMetaInterface(); - StringSearcher.findMetaData( metaInterface, 1, stringList, stepMeta, this ); - } - } - - // Loop over all steps in the transformation and see what the used vars are... - if ( searchDatabases ) { - for ( int i = 0; i < nrDatabases(); i++ ) { - DatabaseMeta meta = getDatabase( i ); - stringList.add( new StringSearchResult( meta.getName(), meta, this, - BaseMessages.getString( PKG, "TransMeta.SearchMetadata.DatabaseConnectionName" ) ) ); - if ( meta.getHostname() != null ) { - stringList.add( new StringSearchResult( meta.getHostname(), meta, this, - BaseMessages.getString( PKG, "TransMeta.SearchMetadata.DatabaseHostName" ) ) ); - } - if ( meta.getDatabaseName() != null ) { - stringList.add( new StringSearchResult( meta.getDatabaseName(), meta, this, - BaseMessages.getString( PKG, "TransMeta.SearchMetadata.DatabaseName" ) ) ); - } - if ( meta.getUsername() != null ) { - stringList.add( new StringSearchResult( meta.getUsername(), meta, this, - BaseMessages.getString( PKG, "TransMeta.SearchMetadata.DatabaseUsername" ) ) ); - } - if ( meta.getPluginId() != null ) { - stringList.add( new StringSearchResult( meta.getPluginId(), meta, this, - BaseMessages.getString( PKG, "TransMeta.SearchMetadata.DatabaseTypeDescription" ) ) ); - } - if ( meta.getDatabasePortNumberString() != null ) { - stringList.add( new StringSearchResult( meta.getDatabasePortNumberString(), meta, this, - BaseMessages.getString( PKG, "TransMeta.SearchMetadata.DatabasePort" ) ) ); - } - if ( meta.getServername() != null ) { - stringList.add( new StringSearchResult( meta.getServername(), meta, this, - BaseMessages.getString( PKG, "TransMeta.SearchMetadata.DatabaseServer" ) ) ); - } - if ( includePasswords ) { - if ( meta.getPassword() != null ) { - stringList.add( new StringSearchResult( meta.getPassword(), meta, this, - BaseMessages.getString( PKG, "TransMeta.SearchMetadata.DatabasePassword" ) ) ); - } - } - } - } - - // Loop over all steps in the transformation and see what the used vars are... - if ( searchNotes ) { - for ( int i = 0; i < nrNotes(); i++ ) { - NotePadMeta meta = getNote( i ); - if ( meta.getNote() != null ) { - stringList.add( new StringSearchResult( meta.getNote(), meta, this, - BaseMessages.getString( PKG, "TransMeta.SearchMetadata.NotepadText" ) ) ); - } - } - } - - return stringList; - } - - /** - * Get a list of all the strings used in this transformation. The parameters indicate which collections to search and - * which to exclude. - * - * @param searchSteps - * true if steps should be searched, false otherwise - * @param searchDatabases - * true if databases should be searched, false otherwise - * @param searchNotes - * true if notes should be searched, false otherwise - * @return a list of search results for strings used in the transformation. - */ - public List getStringList( boolean searchSteps, boolean searchDatabases, boolean searchNotes ) { - return getStringList( searchSteps, searchDatabases, searchNotes, false ); - } + /** + * Gets an array of all the selected step names. + * + * @return An array of all the selected step names. + */ + public String[] getSelectedStepNames() { + List selection = getSelectedSteps(); + String[] retval = new String[selection.size()]; + for (int i = 0; i < retval.length; i++) { + StepMeta stepMeta = selection.get(i); + retval[i] = stepMeta.getName(); + } + return retval; + } - /** - * Gets a list of the used variables in this transformation. - * - * @return a list of the used variables in this transformation. - */ - public List getUsedVariables() { - // Get the list of Strings. - List stringList = getStringList( true, true, false, true ); + /** + * Gets an array of the locations of an array of steps. + * + * @param steps An array of steps + * @return an array of the locations of an array of steps + */ + public int[] getStepIndexes(List steps) { + int[] retval = new int[steps.size()]; - List varList = new ArrayList(); + for (int i = 0; i < steps.size(); i++) { + retval[i] = indexOfStep(steps.get(i)); + } - // Look around in the strings, see what we find... - for ( int i = 0; i < stringList.size(); i++ ) { - StringSearchResult result = stringList.get( i ); - StringUtil.getUsedVariables( result.getString(), varList, false ); + return retval; } - return varList; - } + /** + * Gets the maximum size of the canvas by calculating the maximum location of a step. + * + * @return Maximum coordinate of a step in the transformation + (100,100) for safety. + */ + public Point getMaximum() { + int maxx = 0, maxy = 0; + for (int i = 0; i < nrSteps(); i++) { + StepMeta stepMeta = getStep(i); + Point loc = stepMeta.getLocation(); + if (loc.x > maxx) { + maxx = loc.x; + } + if (loc.y > maxy) { + maxy = loc.y; + } + } + for (int i = 0; i < nrNotes(); i++) { + NotePadMeta notePadMeta = getNote(i); + Point loc = notePadMeta.getLocation(); + if (loc.x + notePadMeta.width > maxx) { + maxx = loc.x + notePadMeta.width; + } + if (loc.y + notePadMeta.height > maxy) { + maxy = loc.y + notePadMeta.height; + } + } - /** - * Gets the previous result. - * - * @return the previous Result. - * @deprecated this was moved to Trans to keep the metadata stateless - */ - @Deprecated - public Result getPreviousResult() { - return previousResult; - } + return new Point(maxx + 100, maxy + 100); + } - /** - * Sets the previous result. - * - * @param previousResult - * The previous Result to set. - * @deprecated this was moved to Trans to keep the metadata stateless - */ - @Deprecated - public void setPreviousResult( Result previousResult ) { - this.previousResult = previousResult; - } + /** + * Gets the minimum point on the canvas of a transformation. + * + * @return Minimum coordinate of a step in the transformation + */ + public Point getMinimum() { + int minx = Integer.MAX_VALUE, miny = Integer.MAX_VALUE; + for (int i = 0; i < nrSteps(); i++) { + StepMeta stepMeta = getStep(i); + Point loc = stepMeta.getLocation(); + if (loc.x < minx) { + minx = loc.x; + } + if (loc.y < miny) { + miny = loc.y; + } + } + for (int i = 0; i < nrNotes(); i++) { + NotePadMeta notePadMeta = getNote(i); + Point loc = notePadMeta.getLocation(); + if (loc.x < minx) { + minx = loc.x; + } + if (loc.y < miny) { + miny = loc.y; + } + } - /** - * Gets a list of the files in the result. - * - * @return a list of ResultFiles. - * - * @deprecated this was moved to Trans to keep the metadata stateless - */ - @Deprecated - public List getResultFiles() { - return resultFiles; - } + if (minx > BORDER_INDENT && minx != Integer.MAX_VALUE) { + minx -= BORDER_INDENT; + } else { + minx = 0; + } + if (miny > BORDER_INDENT && miny != Integer.MAX_VALUE) { + miny -= BORDER_INDENT; + } else { + miny = 0; + } - /** - * Sets the list of the files in the result. - * - * @param resultFiles - * The list of ResultFiles to set. - * @deprecated this was moved to Trans to keep the metadata stateless - */ - @Deprecated - public void setResultFiles( List resultFiles ) { - this.resultFiles = resultFiles; - } + return new Point(minx, miny); + } - /** - * Gets a list of partition schemas for this transformation. - * - * @return a list of PartitionSchemas - */ - public List getPartitionSchemas() { - return partitionSchemas; - } + /** + * Gets the names of all the steps. + * + * @return An array of step names. + */ + public String[] getStepNames() { + String[] retval = new String[nrSteps()]; - /** - * Sets the list of partition schemas for this transformation. - * - * @param partitionSchemas - * the list of PartitionSchemas to set - */ - public void setPartitionSchemas( List partitionSchemas ) { - this.partitionSchemas = partitionSchemas; - } + for (int i = 0; i < nrSteps(); i++) { + retval[i] = getStep(i).getName(); + } - /** - * Gets the partition schemas' names. - * - * @return a String array containing the available partition schema names. - */ - public String[] getPartitionSchemasNames() { - String[] names = new String[partitionSchemas.size()]; - for ( int i = 0; i < names.length; i++ ) { - names[i] = partitionSchemas.get( i ).getName(); + return retval; } - return names; - } - /** - * Checks if is feedback shown. - * - * @return true if feedback is shown, false otherwise - */ - public boolean isFeedbackShown() { - return feedbackShown; - } + /** + * Gets all the steps as an array. + * + * @return An array of all the steps in the transformation. + */ + public StepMeta[] getStepsArray() { + StepMeta[] retval = new StepMeta[nrSteps()]; - /** - * Sets whether the feedback should be shown. - * - * @param feedbackShown - * true if feedback should be shown, false otherwise - */ - public void setFeedbackShown( boolean feedbackShown ) { - this.feedbackShown = feedbackShown; - } + for (int i = 0; i < nrSteps(); i++) { + retval[i] = getStep(i); + } - /** - * Gets the feedback size. - * - * @return the feedback size - */ - public int getFeedbackSize() { - return feedbackSize; - } + return retval; + } - /** - * Sets the feedback size. - * - * @param feedbackSize - * the feedback size to set - */ - public void setFeedbackSize( int feedbackSize ) { - this.feedbackSize = feedbackSize; - } + /** + * Looks in the transformation to find a step in a previous location starting somewhere. + * + * @param startStep The starting step + * @param stepToFind The step to look for backward in the transformation + * @return true if we can find the step in an earlier location in the transformation. + */ + public boolean findPrevious(StepMeta startStep, StepMeta stepToFind) { + String key = startStep.getName() + " - " + stepToFind.getName(); + Boolean result = loopCache.get(key); + if (result != null) { + return result; + } - /** - * Checks if the transformation is using unique database connections. - * - * @return true if the transformation is using unique database connections, false otherwise - */ - public boolean isUsingUniqueConnections() { - return usingUniqueConnections; - } + // Normal steps + // + List previousSteps = findPreviousSteps(startStep, false); + for (int i = 0; i < previousSteps.size(); i++) { + StepMeta stepMeta = previousSteps.get(i); + if (stepMeta.equals(stepToFind)) { + loopCache.put(key, true); + return true; + } - /** - * Sets whether the transformation is using unique database connections. - * - * @param usingUniqueConnections - * true if the transformation is using unique database connections, false otherwise - */ - public void setUsingUniqueConnections( boolean usingUniqueConnections ) { - this.usingUniqueConnections = usingUniqueConnections; - } + boolean found = findPrevious(stepMeta, stepToFind); // Look further back in the tree. + if (found) { + loopCache.put(key, true); + return true; + } + } - /** - * Gets a list of the cluster schemas used by the transformation. - * - * @return a list of ClusterSchemas - */ - public List getClusterSchemas() { - return clusterSchemas; - } + // Info steps + List infoSteps = findPreviousSteps(startStep, true); + for (int i = 0; i < infoSteps.size(); i++) { + StepMeta stepMeta = infoSteps.get(i); + if (stepMeta.equals(stepToFind)) { + loopCache.put(key, true); + return true; + } - /** - * Sets list of the cluster schemas used by the transformation. - * - * @param clusterSchemas - * the list of ClusterSchemas to set - */ - public void setClusterSchemas( List clusterSchemas ) { - this.clusterSchemas = clusterSchemas; - } + boolean found = findPrevious(stepMeta, stepToFind); // Look further back in the tree. + if (found) { + loopCache.put(key, true); + return true; + } + } - /** - * Gets the cluster schema names. - * - * @return a String array containing the cluster schemas' names - */ - public String[] getClusterSchemaNames() { - String[] names = new String[clusterSchemas.size()]; - for ( int i = 0; i < names.length; i++ ) { - names[i] = clusterSchemas.get( i ).getName(); + loopCache.put(key, false); + return false; } - return names; - } - - /** - * Find a partition schema using its name. - * - * @param name - * The name of the partition schema to look for. - * @return the partition with the specified name of null if nothing was found - */ - public PartitionSchema findPartitionSchema( String name ) { - for ( int i = 0; i < partitionSchemas.size(); i++ ) { - PartitionSchema schema = partitionSchemas.get( i ); - if ( schema.getName().equalsIgnoreCase( name ) ) { - return schema; - } - } - return null; - } - - /** - * Find a clustering schema using its name. - * - * @param name - * The name of the clustering schema to look for. - * @return the cluster schema with the specified name of null if nothing was found - */ - public ClusterSchema findClusterSchema( String name ) { - for ( int i = 0; i < clusterSchemas.size(); i++ ) { - ClusterSchema schema = clusterSchemas.get( i ); - if ( schema.getName().equalsIgnoreCase( name ) ) { - return schema; - } - } - return null; - } - - /** - * Add a new partition schema to the transformation if that didn't exist yet. Otherwise, replace it. - * - * @param partitionSchema - * The partition schema to be added. - */ - public void addOrReplacePartitionSchema( PartitionSchema partitionSchema ) { - int index = partitionSchemas.indexOf( partitionSchema ); - if ( index < 0 ) { - partitionSchemas.add( partitionSchema ); - } else { - PartitionSchema previous = partitionSchemas.get( index ); - previous.replaceMeta( partitionSchema ); - } - setChanged(); - } - - /** - * Add a new cluster schema to the transformation if that didn't exist yet. Otherwise, replace it. - * - * @param clusterSchema - * The cluster schema to be added. - */ - public void addOrReplaceClusterSchema( ClusterSchema clusterSchema ) { - int index = clusterSchemas.indexOf( clusterSchema ); - if ( index < 0 ) { - clusterSchemas.add( clusterSchema ); - } else { - ClusterSchema previous = clusterSchemas.get( index ); - previous.replaceMeta( clusterSchema ); - } - setChanged(); - } - - /** - * Save shared objects, including databases, steps, partition schemas, slave servers, and cluster schemas, to a file - * - * @throws KettleException - * the kettle exception - * @see org.pentaho.di.core.EngineMetaInterface#saveSharedObjects() - * @see org.pentaho.di.shared.SharedObjects#saveToFile() - */ - public void saveSharedObjects() throws KettleException { - try { - // Save the meta store shared objects... - // - saveMetaStoreObjects( repository, metaStore ); - - // Load all the shared objects... - String soFile = environmentSubstitute( sharedObjectsFile ); - SharedObjects sharedObjects = new SharedObjects( soFile ); - - // Now overwrite the objects in there - List shared = new ArrayList(); - shared.addAll( databases ); - shared.addAll( steps ); - shared.addAll( partitionSchemas ); - shared.addAll( slaveServers ); - shared.addAll( clusterSchemas ); - - // The databases connections... - for ( SharedObjectInterface sharedObject : shared ) { - if ( sharedObject.isShared() ) { - sharedObjects.storeObject( sharedObject ); - } - } - - // Save the objects - sharedObjects.saveToFile(); - } catch ( Exception e ) { - throw new KettleException( "Unable to save shared ojects", e ); - } - } - - /** - * Checks whether the transformation is using thread priority management. - * - * @return true if the transformation is using thread priority management, false otherwise - */ - public boolean isUsingThreadPriorityManagment() { - return usingThreadPriorityManagment; - } - - /** - * Sets whether the transformation is using thread priority management. - * - * @param usingThreadPriorityManagment - * true if the transformation is using thread priority management, false otherwise - */ - public void setUsingThreadPriorityManagment( boolean usingThreadPriorityManagment ) { - this.usingThreadPriorityManagment = usingThreadPriorityManagment; - } - /** - * Check a step to see if there are no multiple steps to read from. If so, check to see if the receiving rows are all - * the same in layout. We only want to ONLY use the DBCache for this to prevent GUI stalls. - * - * @param stepMeta - * the step to check - * @param monitor - * the monitor - * @throws KettleRowException - * in case we detect a row mixing violation - */ - public void checkRowMixingStatically( StepMeta stepMeta, ProgressMonitorListener monitor ) throws KettleRowException { - int nrPrevious = findNrPrevSteps( stepMeta ); - if ( nrPrevious > 1 ) { - RowMetaInterface referenceRow = null; - // See if all previous steps send out the same rows... - for ( int i = 0; i < nrPrevious; i++ ) { - StepMeta previousStep = findPrevStep( stepMeta, i ); + /** + * Puts the steps in alphabetical order. + */ + public void sortSteps() { try { - RowMetaInterface row = getStepFields( previousStep, monitor ); // Throws KettleStepException - if ( referenceRow == null ) { - referenceRow = row; - } else if ( !stepMeta.getStepMetaInterface().excludeFromRowLayoutVerification() ) { - BaseStep.safeModeChecking( referenceRow, row ); - } - } catch ( KettleStepException e ) { - // We ignore this one because we are in the process of designing the transformation, anything intermediate can - // go wrong. - } - } - } - } - - /** - * Sets the internal kettle variables. - * - * @param var - * the new internal kettle variables - */ - public void setInternalKettleVariables( VariableSpace var ) { - setInternalFilenameKettleVariables( var ); - setInternalNameKettleVariable( var ); - - // The name of the directory in the repository - // - variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_REPOSITORY_DIRECTORY, - directory != null ? directory.getPath() : "" ); - - boolean hasRepoDir = getRepositoryDirectory() != null && getRepository() != null; - - if ( hasRepoDir ) { - variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY, - variables.getVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_REPOSITORY_DIRECTORY ) ); - } else { - variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_REPOSITORY_DIRECTORY, - variables.getVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY ) ); + Collections.sort(steps); + } catch (Exception e) { + log.logError(BaseMessages.getString(PKG, "TransMeta.Exception.ErrorOfSortingSteps") + e); + log.logError(Const.getStackTracker(e)); + } } - // Here we don't remove the job specific parameters, as they may come in handy. - // - if ( variables.getVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY ) == null ) { - variables.setVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY, "Parent Job File Directory" ); - } - if ( variables.getVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_NAME ) == null ) { - variables.setVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_NAME, "Parent Job Filename" ); - } - if ( variables.getVariable( Const.INTERNAL_VARIABLE_JOB_NAME ) == null ) { - variables.setVariable( Const.INTERNAL_VARIABLE_JOB_NAME, "Parent Job Name" ); - } - if ( variables.getVariable( Const.INTERNAL_VARIABLE_JOB_REPOSITORY_DIRECTORY ) == null ) { - variables.setVariable( Const.INTERNAL_VARIABLE_JOB_REPOSITORY_DIRECTORY, "Parent Job Repository Directory" ); + /** + * Sorts all the hops in the transformation. + */ + public void sortHops() { + Collections.sort(hops); } - variables.setVariable( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY, - variables.getVariable( repository != null ? Const.INTERNAL_VARIABLE_TRANSFORMATION_REPOSITORY_DIRECTORY - : Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY ) ); - } - - /** - * Sets the internal name kettle variable. - * - * @param var - * the new internal name kettle variable - */ - protected void setInternalNameKettleVariable( VariableSpace var ) { - // The name of the transformation - // - variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_NAME, Const.NVL( name, "" ) ); - } - - /** - * Sets the internal filename kettle variables. - * - * @param var - * the new internal filename kettle variables - */ - protected void setInternalFilenameKettleVariables( VariableSpace var ) { - // If we have a filename that's defined, set variables. If not, clear them. - // - if ( !Const.isEmpty( filename ) ) { - try { - FileObject fileObject = KettleVFS.getFileObject( filename, var ); - FileName fileName = fileObject.getName(); - - // The filename of the transformation - variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_NAME, fileName.getBaseName() ); - - // The directory of the transformation - FileName fileDir = fileName.getParent(); - variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY, fileDir.getURI() ); - } catch ( KettleFileException e ) { - log.logError( "Unexpected error setting internal filename variables!", e ); - - variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY, "" ); - variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_NAME, "" ); - } - } else { - variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY, "" ); - variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_NAME, "" ); - } - - } - - /** - * Finds the mapping input step with the specified name. If no mapping input step is found, null is returned - * - * @param stepname - * the name to search for - * @return the step meta-data corresponding to the desired mapping input step, or null if no step was found - * @throws KettleStepException - * if any errors occur during the search - */ - public StepMeta findMappingInputStep( String stepname ) throws KettleStepException { - if ( !Const.isEmpty( stepname ) ) { - StepMeta stepMeta = findStep( stepname ); // TODO verify that it's a mapping input!! - if ( stepMeta == null ) { - throw new KettleStepException( BaseMessages.getString( - PKG, "TransMeta.Exception.StepNameNotFound", stepname ) ); - } - return stepMeta; - } else { - // Find the first mapping input step that fits the bill. - StepMeta stepMeta = null; - for ( StepMeta mappingStep : steps ) { - if ( mappingStep.getStepID().equals( "MappingInput" ) ) { - if ( stepMeta == null ) { - stepMeta = mappingStep; - } else if ( stepMeta != null ) { - throw new KettleStepException( BaseMessages.getString( - PKG, "TransMeta.Exception.OnlyOneMappingInputStepAllowed", "2" ) ); - } - } - } - if ( stepMeta == null ) { - throw new KettleStepException( BaseMessages.getString( - PKG, "TransMeta.Exception.OneMappingInputStepRequired" ) ); - } - return stepMeta; - } - } - - /** - * Finds the mapping output step with the specified name. If no mapping output step is found, null is returned. - * - * @param stepname - * the name to search for - * @return the step meta-data corresponding to the desired mapping input step, or null if no step was found - * @throws KettleStepException - * if any errors occur during the search - */ - public StepMeta findMappingOutputStep( String stepname ) throws KettleStepException { - if ( !Const.isEmpty( stepname ) ) { - StepMeta stepMeta = findStep( stepname ); // TODO verify that it's a mapping output step. - if ( stepMeta == null ) { - throw new KettleStepException( BaseMessages.getString( - PKG, "TransMeta.Exception.StepNameNotFound", stepname ) ); - } - return stepMeta; - } else { - // Find the first mapping output step that fits the bill. - StepMeta stepMeta = null; - for ( StepMeta mappingStep : steps ) { - if ( mappingStep.getStepID().equals( "MappingOutput" ) ) { - if ( stepMeta == null ) { - stepMeta = mappingStep; - } else if ( stepMeta != null ) { - throw new KettleStepException( BaseMessages.getString( - PKG, "TransMeta.Exception.OnlyOneMappingOutputStepAllowed", "2" ) ); - } - } - } - if ( stepMeta == null ) { - throw new KettleStepException( BaseMessages.getString( - PKG, "TransMeta.Exception.OneMappingOutputStepRequired" ) ); - } - return stepMeta; - } - } - - /** - * Gets a list of the resource dependencies. - * - * @return a list of ResourceReferences - */ - public List getResourceDependencies() { - List resourceReferences = new ArrayList(); + /** + * The previous count. + */ + private long prevCount; - for ( StepMeta stepMeta : steps ) { - resourceReferences.addAll( stepMeta.getResourceDependencies( this ) ); - } + /** + * Puts the steps in a more natural order: from start to finish. For the moment, we ignore splits and joins. Splits + * and joins can't be listed sequentially in any case! + * + * @return a map containing all the previous steps per step + */ + public Map> sortStepsNatural() { + long startTime = System.currentTimeMillis(); - return resourceReferences; - } + prevCount = 0; - /** - * Exports the specified objects to a flat-file system, adding content with filename keys to a set of definitions. The - * supplied resource naming interface allows the object to name appropriately without worrying about those parts of - * the implementation specific details. - * - * @param space - * the variable space to use - * @param definitions - * @param resourceNamingInterface - * @param repository - * The repository to optionally load other resources from (to be converted to XML) - * @param metaStore - * the metaStore in which non-kettle metadata could reside. - * - * @return the filename of the exported resource - */ - public String exportResources( VariableSpace space, Map definitions, - ResourceNamingInterface resourceNamingInterface, Repository repository, IMetaStore metaStore ) throws KettleException { - - try { - // Handle naming for both repository and XML bases resources... - // - String baseName; - String originalPath; - String fullname; - String extension = "ktr"; - if ( Const.isEmpty( getFilename() ) ) { - // Assume repository... + // First create a map where all the previous steps of another step are kept... // - originalPath = directory.getPath(); - baseName = getName(); - fullname = - directory.getPath() - + ( directory.getPath().endsWith( RepositoryDirectory.DIRECTORY_SEPARATOR ) - ? "" : RepositoryDirectory.DIRECTORY_SEPARATOR ) + getName() + "." + extension; // - } else { - // Assume file + final Map> stepMap = new HashMap>(); + + // Also cache the previous steps // - FileObject fileObject = KettleVFS.getFileObject( space.environmentSubstitute( getFilename() ), space ); - originalPath = fileObject.getParent().getURL().toString(); - baseName = fileObject.getName().getBaseName(); - fullname = fileObject.getURL().toString(); - } - - String - exportFileName = - resourceNamingInterface - .nameResource( baseName, originalPath, extension, ResourceNamingInterface.FileNamingType.TRANSFORMATION ); - ResourceDefinition definition = definitions.get( exportFileName ); - if ( definition == null ) { - // If we do this once, it will be plenty :-) + final Map> previousCache = new HashMap>(); + + // Cache calculation of steps before another // - TransMeta transMeta = (TransMeta) this.realClone( false ); - // transMeta.copyVariablesFrom(space); + Map> beforeCache = new HashMap>(); + + for (StepMeta stepMeta : steps) { + // What are the previous steps? (cached version for performance) + // + List prevSteps = previousCache.get(stepMeta); + if (prevSteps == null) { + prevSteps = findPreviousSteps(stepMeta); + prevCount++; + previousCache.put(stepMeta, prevSteps); + } - // Add used resources, modify transMeta accordingly - // Go through the list of steps, etc. - // These critters change the steps in the cloned TransMeta - // At the end we make a new XML version of it in "exported" - // format... + // Now get the previous steps recursively, store them in the step map + // + for (StepMeta prev : prevSteps) { + Map beforePrevMap = updateFillStepMap(previousCache, beforeCache, stepMeta, prev); + stepMap.put(stepMeta, beforePrevMap); - // loop over steps, databases will be exported to XML anyway. - // - for ( StepMeta stepMeta : transMeta.getSteps() ) { - stepMeta.exportResources( space, definitions, resourceNamingInterface, repository, metaStore ); + // Store it also in the beforeCache... + // + beforeCache.put(prev, beforePrevMap); + } } - // Change the filename, calling this sets internal variables - // inside of the transformation. + Collections.sort(steps, new Comparator() { + + public int compare(StepMeta o1, StepMeta o2) { + + Map beforeMap = stepMap.get(o1); + if (beforeMap != null) { + if (beforeMap.get(o2) == null) { + return -1; + } else { + return 1; + } + } else { + return o1.getName().compareToIgnoreCase(o2.getName()); + } + } + }); + + long endTime = System.currentTimeMillis(); + log.logBasic( + BaseMessages.getString(PKG, "TransMeta.Log.TimeExecutionStepSort", (endTime - startTime), prevCount)); + + return stepMap; + } + + /** + * Fills a map with all steps previous to the given step. This method uses a caching technique, so if a map is + * provided that contains the specified previous step, it is immediately returned to avoid unnecessary processing. + * Otherwise, the previous steps are determined and added to the map recursively, and a cache is constructed for later + * use. + * + * @param previousCache the previous cache, must be non-null + * @param beforeCache the before cache, must be non-null + * @param originStepMeta the origin step meta + * @param previousStepMeta the previous step meta + * @return the map + */ + private Map updateFillStepMap(Map> previousCache, + Map> beforeCache, StepMeta originStepMeta, StepMeta previousStepMeta) { + + // See if we have a hash map to store step occurrence (located before the step) // - transMeta.setFilename( exportFileName ); + Map beforeMap = beforeCache.get(previousStepMeta); + if (beforeMap == null) { + beforeMap = new HashMap(); + } else { + return beforeMap; // Nothing left to do here! + } - // All objects get re-located to the root folder + // Store the current previous step in the map // - transMeta.setRepositoryDirectory( new RepositoryDirectory() ); + beforeMap.put(previousStepMeta, Boolean.TRUE); - // Set a number of parameters for all the data files referenced so far... + // Figure out all the previous steps as well, they all need to go in there... // - Map directoryMap = resourceNamingInterface.getDirectoryMap(); - if ( directoryMap != null ) { - for ( String directory : directoryMap.keySet() ) { - String parameterName = directoryMap.get( directory ); - transMeta.addParameterDefinition( parameterName, directory, "Data file path discovered during export" ); - } + List prevSteps = previousCache.get(previousStepMeta); + if (prevSteps == null) { + prevSteps = findPreviousSteps(previousStepMeta); + prevCount++; + previousCache.put(previousStepMeta, prevSteps); } - // At the end, add ourselves to the map... + // Now, get the previous steps for stepMeta recursively... + // We only do this when the beforeMap is not known yet... // - String transMetaContent = transMeta.getXML(); + for (StepMeta prev : prevSteps) { + Map beforePrevMap = updateFillStepMap(previousCache, beforeCache, originStepMeta, prev); - definition = new ResourceDefinition( exportFileName, transMetaContent ); + // Keep a copy in the cache... + // + beforeCache.put(prev, beforePrevMap); - // Also remember the original filename (if any), including variables etc. - // - if ( Const.isEmpty( this.getFilename() ) ) { // Repository - definition.setOrigin( fullname ); - } else { - definition.setOrigin( this.getFilename() ); + // Also add it to the new map for this step... + // + beforeMap.putAll(beforePrevMap); } - definitions.put( fullname, definition ); - } - return exportFileName; - } catch ( FileSystemException e ) { - throw new KettleException( BaseMessages.getString( - PKG, "TransMeta.Exception.ErrorOpeningOrValidatingTheXMLFile", getFilename() ), e ); - } catch ( KettleFileException e ) { - throw new KettleException( BaseMessages.getString( - PKG, "TransMeta.Exception.ErrorOpeningOrValidatingTheXMLFile", getFilename() ), e ); + return beforeMap; } - } - /** - * Gets the slave step copy partition distribution. - * - * @return the SlaveStepCopyPartitionDistribution + /** + * Sorts the hops in a natural way: from beginning to end. + */ + public void sortHopsNatural() { + // Loop over the hops... + for (int j = 0; j < nrTransHops(); j++) { + // Buble sort: we need to do this several times... + for (int i = 0; i < nrTransHops() - 1; i++) { + TransHopMeta one = getTransHop(i); + TransHopMeta two = getTransHop(i + 1); + + StepMeta a = two.getFromStep(); + StepMeta b = one.getToStep(); + + if (!findPrevious(a, b) && !a.equals(b)) { + setTransHop(i + 1, one); + setTransHop(i, two); + } + } + } + } + + /** + * Determines the impact of the different steps in a transformation on databases, tables and field. + * + * @param impact An ArrayList of DatabaseImpact objects. + * @param monitor a progress monitor listener to be updated as the transformation is analyzed + * @throws KettleStepException if any errors occur during analysis + */ + public void analyseImpact(List impact, ProgressMonitorListener monitor) throws KettleStepException { + if (monitor != null) { + monitor + .beginTask(BaseMessages.getString(PKG, "TransMeta.Monitor.DeterminingImpactTask.Title"), nrSteps()); + } + boolean stop = false; + for (int i = 0; i < nrSteps() && !stop; i++) { + if (monitor != null) { + monitor.subTask( + BaseMessages.getString(PKG, "TransMeta.Monitor.LookingAtStepTask.Title") + (i + 1) + "/" + nrSteps()); + } + StepMeta stepMeta = getStep(i); + + RowMetaInterface prev = getPrevStepFields(stepMeta); + StepMetaInterface stepint = stepMeta.getStepMetaInterface(); + RowMetaInterface inform = null; + StepMeta[] lu = getInfoStep(stepMeta); + if (lu != null) { + inform = getStepFields(lu); + } else { + inform = stepint.getTableFields(); + } + + compatibleAnalyseImpactStep(impact, stepint, this, stepMeta, prev, inform); + stepint.analyseImpact(impact, this, stepMeta, prev, null, null, inform, repository, metaStore); + + if (monitor != null) { + monitor.worked(1); + stop = monitor.isCanceled(); + } + } + + if (monitor != null) { + monitor.done(); + } + } + + @SuppressWarnings("deprecation") + private void compatibleAnalyseImpactStep(List impact, StepMetaInterface stepint, TransMeta transMeta, + StepMeta stepMeta, RowMetaInterface prev, RowMetaInterface inform) throws KettleStepException { + stepint.analyseImpact(impact, transMeta, stepMeta, prev, null, null, inform); + } + + /** + * Proposes an alternative stepname when the original already exists. + * + * @param stepname The stepname to find an alternative for + * @return The suggested alternative stepname. + */ + public String getAlternativeStepname(String stepname) { + String newname = stepname; + StepMeta stepMeta = findStep(newname); + int nr = 1; + while (stepMeta != null) { + nr++; + newname = stepname + " " + nr; + stepMeta = findStep(newname); + } + + return newname; + } + + /** + * Builds a list of all the SQL statements that this transformation needs in order to work properly. + * + * @return An ArrayList of SQLStatement objects. + * @throws KettleStepException if any errors occur during SQL statement generation + */ + public List getSQLStatements() throws KettleStepException { + return getSQLStatements(null); + } + + /** + * Builds a list of all the SQL statements that this transformation needs in order to work properly. + * + * @param monitor a progress monitor listener to be updated as the SQL statements are generated + * @return An ArrayList of SQLStatement objects. + * @throws KettleStepException if any errors occur during SQL statement generation + */ + public List getSQLStatements(ProgressMonitorListener monitor) throws KettleStepException { + if (monitor != null) { + monitor.beginTask(BaseMessages.getString(PKG, "TransMeta.Monitor.GettingTheSQLForTransformationTask.Title"), nrSteps() + 1); + } + List stats = new ArrayList(); + + for (int i = 0; i < nrSteps(); i++) { + StepMeta stepMeta = getStep(i); + if (monitor != null) { + monitor.subTask( + BaseMessages.getString(PKG, "TransMeta.Monitor.GettingTheSQLForStepTask.Title", "" + stepMeta)); + } + RowMetaInterface prev = getPrevStepFields(stepMeta); + SQLStatement sqlCompat = compatibleStepMetaGetSQLStatements(stepMeta.getStepMetaInterface(), stepMeta, prev); + if (sqlCompat.getSQL() != null || sqlCompat.hasError()) { + stats.add(sqlCompat); + } + SQLStatement + sql = + stepMeta.getStepMetaInterface().getSQLStatements(this, stepMeta, prev, repository, metaStore); + if (sql.getSQL() != null || sql.hasError()) { + stats.add(sql); + } + if (monitor != null) { + monitor.worked(1); + } + } + + // Also check the sql for the logtable... + // + if (monitor != null) { + monitor.subTask(BaseMessages.getString(PKG, "TransMeta.Monitor.GettingTheSQLForTransformationTask.Title2")); + } + if (transLogTable.getDatabaseMeta() != null && (!Const.isEmpty(transLogTable.getTableName()) || !Const + .isEmpty(performanceLogTable.getTableName()))) { + try { + for (LogTableInterface logTable : new LogTableInterface[]{transLogTable, performanceLogTable, + channelLogTable, stepLogTable,}) { + if (logTable.getDatabaseMeta() != null && !Const.isEmpty(logTable.getTableName())) { + + Database db = null; + try { + db = new Database(this, transLogTable.getDatabaseMeta()); + db.shareVariablesWith(this); + db.connect(); + + RowMetaInterface fields = logTable.getLogRecord(LogStatus.START, null, null).getRowMeta(); + String + schemaTable = + logTable.getDatabaseMeta() + .getQuotedSchemaTableCombination(logTable.getSchemaName(), logTable.getTableName()); + String sql = db.getDDL(schemaTable, fields); + if (!Const.isEmpty(sql)) { + SQLStatement stat = new SQLStatement("", transLogTable.getDatabaseMeta(), sql); + stats.add(stat); + } + } catch (Exception e) { + throw new KettleDatabaseException( + "Unable to connect to logging database [" + logTable.getDatabaseMeta() + "]", e); + } finally { + if (db != null) { + db.disconnect(); + } + } + } + } + } catch (KettleDatabaseException dbe) { + SQLStatement stat = new SQLStatement("", transLogTable.getDatabaseMeta(), null); + stat.setError( + BaseMessages.getString(PKG, "TransMeta.SQLStatement.ErrorDesc.ErrorObtainingTransformationLogTableInfo") + + dbe.getMessage()); + stats.add(stat); + } + } + if (monitor != null) { + monitor.worked(1); + } + if (monitor != null) { + monitor.done(); + } + + return stats; + } + + @SuppressWarnings("deprecation") + private SQLStatement compatibleStepMetaGetSQLStatements(StepMetaInterface stepMetaInterface, StepMeta stepMeta, + RowMetaInterface prev) throws KettleStepException { + return stepMetaInterface.getSQLStatements(this, stepMeta, prev); + } + + /** + * Get the SQL statements (needed to run this transformation) as a single String. + * + * @return the SQL statements needed to run this transformation + * @throws KettleStepException if any errors occur during SQL statement generation + */ + public String getSQLStatementsString() throws KettleStepException { + String sql = ""; + List stats = getSQLStatements(); + for (int i = 0; i < stats.size(); i++) { + SQLStatement stat = stats.get(i); + if (!stat.hasError() && stat.hasSQL()) { + sql += stat.getSQL(); + } + } + + return sql; + } + + /** + * Checks all the steps and fills a List of (CheckResult) remarks. + * + * @param remarks The remarks list to add to. + * @param only_selected true to check only the selected steps, false for all steps + * @param monitor a progress monitor listener to be updated as the SQL statements are generated + */ + @Deprecated + public void checkSteps(List remarks, boolean only_selected, ProgressMonitorListener monitor) { + checkSteps(remarks, only_selected, monitor, this, null, null); + } + + /** + * Checks all the steps and fills a List of (CheckResult) remarks. + * + * @param remarks The remarks list to add to. + * @param only_selected true to check only the selected steps, false for all steps + * @param monitor a progress monitor listener to be updated as the SQL statements are generated + */ + public void checkSteps(List remarks, boolean only_selected, ProgressMonitorListener monitor, + VariableSpace space, Repository repository, IMetaStore metaStore) { + try { + remarks.clear(); // Start with a clean slate... + + Map values = new Hashtable(); + String[] stepnames; + StepMeta[] steps; + List selectedSteps = getSelectedSteps(); + if (!only_selected || selectedSteps.isEmpty()) { + stepnames = getStepNames(); + steps = getStepsArray(); + } else { + stepnames = getSelectedStepNames(); + steps = selectedSteps.toArray(new StepMeta[selectedSteps.size()]); + } + + ExtensionPointHandler.callExtensionPoint(getLogChannel(), KettleExtensionPoint.BeforeCheckSteps.id, + new CheckStepsExtension(remarks, space, this, steps, repository, metaStore)); + + boolean stop_checking = false; + + if (monitor != null) { + monitor.beginTask(BaseMessages.getString(PKG, "TransMeta.Monitor.VerifyingThisTransformationTask.Title"), + steps.length + 2); + } + + for (int i = 0; i < steps.length && !stop_checking; i++) { + if (monitor != null) { + monitor.subTask(BaseMessages.getString(PKG, "TransMeta.Monitor.VerifyingStepTask.Title", stepnames[i])); + } + + StepMeta stepMeta = steps[i]; + + int nrinfo = findNrInfoSteps(stepMeta); + StepMeta[] infostep = null; + if (nrinfo > 0) { + infostep = getInfoStep(stepMeta); + } + + RowMetaInterface info = null; + if (infostep != null) { + try { + info = getStepFields(infostep); + } catch (KettleStepException kse) { + info = null; + CheckResult + cr = + new CheckResult(CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages.getString(PKG, + "TransMeta.CheckResult.TypeResultError.ErrorOccurredGettingStepInfoFields.Description", + "" + stepMeta, Const.CR + kse.getMessage()), stepMeta); + remarks.add(cr); + } + } + + // The previous fields from non-informative steps: + RowMetaInterface prev = null; + try { + prev = getPrevStepFields(stepMeta); + } catch (KettleStepException kse) { + CheckResult + cr = + new CheckResult(CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages + .getString(PKG, "TransMeta.CheckResult.TypeResultError.ErrorOccurredGettingInputFields.Description", + "" + stepMeta, Const.CR + kse.getMessage()), stepMeta); + remarks.add(cr); + // This is a severe error: stop checking... + // Otherwise we wind up checking time & time again because nothing gets put in the database + // cache, the timeout of certain databases is very long... (Oracle) + stop_checking = true; + } + + if (isStepUsedInTransHops(stepMeta)) { + // Get the input & output steps! + // Copy to arrays: + String[] input = getPrevStepNames(stepMeta); + String[] output = getNextStepNames(stepMeta); + + // Check step specific info... + ExtensionPointHandler.callExtensionPoint(getLogChannel(), KettleExtensionPoint.BeforeCheckStep.id, + new CheckStepsExtension(remarks, space, this, new StepMeta[]{stepMeta}, repository, metaStore)); + stepMeta.check(remarks, this, prev, input, output, info, space, repository, metaStore); + ExtensionPointHandler.callExtensionPoint(getLogChannel(), KettleExtensionPoint.AfterCheckStep.id, + new CheckStepsExtension(remarks, space, this, new StepMeta[]{stepMeta}, repository, metaStore)); + + // See if illegal characters etc. were used in field-names... + if (prev != null) { + for (int x = 0; x < prev.size(); x++) { + ValueMetaInterface v = prev.getValueMeta(x); + String name = v.getName(); + if (name == null) { + values.put(v, + BaseMessages.getString(PKG, "TransMeta.Value.CheckingFieldName.FieldNameIsEmpty.Description")); + } else if (name.indexOf(' ') >= 0) { + values.put(v, BaseMessages + .getString(PKG, "TransMeta.Value.CheckingFieldName.FieldNameContainsSpaces.Description")); + } else { + char[] list = + new char[]{'.', ',', '-', '/', '+', '*', '\'', '\t', '"', '|', '@', '(', ')', '{', '}', '!', + '^'}; + for (int c = 0; c < list.length; c++) { + if (name.indexOf(list[c]) >= 0) { + values.put(v, BaseMessages.getString(PKG, + "TransMeta.Value.CheckingFieldName.FieldNameContainsUnfriendlyCodes.Description", + String.valueOf(list[c]))); + } + } + } + } + + // Check if 2 steps with the same name are entering the step... + if (prev.size() > 1) { + String[] fieldNames = prev.getFieldNames(); + String[] sortedNames = Const.sortStrings(fieldNames); + + String prevName = sortedNames[0]; + for (int x = 1; x < sortedNames.length; x++) { + // Checking for doubles + if (prevName.equalsIgnoreCase(sortedNames[x])) { + // Give a warning!! + CheckResult + cr = + new CheckResult(CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages + .getString(PKG, "TransMeta.CheckResult.TypeResultWarning.HaveTheSameNameField.Description", + prevName), stepMeta); + remarks.add(cr); + } else { + prevName = sortedNames[x]; + } + } + } + } else { + CheckResult + cr = + new CheckResult(CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages + .getString(PKG, "TransMeta.CheckResult.TypeResultError.CannotFindPreviousFields.Description") + + stepMeta.getName(), stepMeta); + remarks.add(cr); + } + } else { + CheckResult + cr = + new CheckResult(CheckResultInterface.TYPE_RESULT_WARNING, + BaseMessages.getString(PKG, "TransMeta.CheckResult.TypeResultWarning.StepIsNotUsed.Description"), + stepMeta); + remarks.add(cr); + } + + // Also check for mixing rows... + try { + checkRowMixingStatically(stepMeta, null); + } catch (KettleRowException e) { + CheckResult cr = new CheckResult(CheckResultInterface.TYPE_RESULT_ERROR, e.getMessage(), stepMeta); + remarks.add(cr); + } + + if (monitor != null) { + monitor.worked(1); // progress bar... + if (monitor.isCanceled()) { + stop_checking = true; + } + } + } + + // Also, check the logging table of the transformation... + if (monitor == null || !monitor.isCanceled()) { + if (monitor != null) { + monitor.subTask(BaseMessages.getString(PKG, "TransMeta.Monitor.CheckingTheLoggingTableTask.Title")); + } + if (transLogTable.getDatabaseMeta() != null) { + Database logdb = new Database(this, transLogTable.getDatabaseMeta()); + logdb.shareVariablesWith(this); + try { + logdb.connect(); + CheckResult + cr = + new CheckResult(CheckResultInterface.TYPE_RESULT_OK, + BaseMessages.getString(PKG, "TransMeta.CheckResult.TypeResultOK.ConnectingWorks.Description"), + null); + remarks.add(cr); + + if (transLogTable.getTableName() != null) { + if (logdb.checkTableExists(transLogTable.getTableName())) { + cr = + new CheckResult(CheckResultInterface.TYPE_RESULT_OK, BaseMessages + .getString(PKG, "TransMeta.CheckResult.TypeResultOK.LoggingTableExists.Description", + transLogTable.getTableName()), null); + remarks.add(cr); + + RowMetaInterface fields = transLogTable.getLogRecord(LogStatus.START, null, null).getRowMeta(); + String sql = logdb.getDDL(transLogTable.getTableName(), fields); + if (sql == null || sql.length() == 0) { + cr = + new CheckResult(CheckResultInterface.TYPE_RESULT_OK, + BaseMessages.getString(PKG, "TransMeta.CheckResult.TypeResultOK.CorrectLayout.Description"), + null); + remarks.add(cr); + } else { + cr = + new CheckResult(CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages.getString(PKG, + "TransMeta.CheckResult.TypeResultError.LoggingTableNeedsAdjustments.Description") + Const.CR + + sql, null); + remarks.add(cr); + } + + } else { + cr = + new CheckResult(CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages + .getString(PKG, "TransMeta.CheckResult.TypeResultError.LoggingTableDoesNotExist.Description"), + null); + remarks.add(cr); + } + } else { + cr = + new CheckResult(CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages + .getString(PKG, "TransMeta.CheckResult.TypeResultError.LogTableNotSpecified.Description"), + null); + remarks.add(cr); + } + } catch (KettleDatabaseException dbe) { + // Ignore errors + } finally { + logdb.disconnect(); + } + } + if (monitor != null) { + monitor.worked(1); + } + + } + + if (monitor != null) { + monitor.subTask(BaseMessages + .getString(PKG, "TransMeta.Monitor.CheckingForDatabaseUnfriendlyCharactersInFieldNamesTask.Title")); + } + if (values.size() > 0) { + for (ValueMetaInterface v : values.keySet()) { + String message = values.get(v); + CheckResult + cr = + new CheckResult(CheckResultInterface.TYPE_RESULT_WARNING, BaseMessages + .getString(PKG, "TransMeta.CheckResult.TypeResultWarning.Description", v.getName(), message, + v.getOrigin()), findStep(v.getOrigin())); + remarks.add(cr); + } + } else { + CheckResult + cr = + new CheckResult(CheckResultInterface.TYPE_RESULT_OK, + BaseMessages.getString(PKG, "TransMeta.CheckResult.TypeResultOK.Description"), null); + remarks.add(cr); + } + if (monitor != null) { + monitor.worked(1); + } + ExtensionPointHandler.callExtensionPoint(getLogChannel(), KettleExtensionPoint.AfterCheckSteps.id, + new CheckStepsExtension(remarks, space, this, steps, repository, metaStore)); + } catch (Exception e) { + log.logError(Const.getStackTracker(e)); + throw new RuntimeException(e); + } + + } + + /** + * Gets the result rows. + * + * @return a list containing the result rows. + * @deprecated Moved to Trans to make this class stateless + */ + @Deprecated + public List getResultRows() { + return resultRows; + } + + /** + * Sets the list of result rows. + * + * @param resultRows The list of result rows to set. + * @deprecated Moved to Trans to make this class stateless + */ + @Deprecated + public void setResultRows(List resultRows) { + this.resultRows = resultRows; + } + + /** + * Gets the repository directory path and name of the transformation. + * + * @return The repository directory path plus the name of the transformation + */ + public String getPathAndName() { + if (getRepositoryDirectory().isRoot()) { + return getRepositoryDirectory().getPath() + getName(); + } else { + return getRepositoryDirectory().getPath() + RepositoryDirectory.DIRECTORY_SEPARATOR + getName(); + } + } + + /** + * Gets the arguments used for this transformation. + * + * @return an array of String arguments for the transformation + * @deprecated moved to Trans + */ + @Deprecated + public String[] getArguments() { + return arguments; + } + + /** + * Sets the arguments used for this transformation. + * + * @param arguments The arguments to set. + * @deprecated moved to Trans + */ + @Deprecated + public void setArguments(String[] arguments) { + this.arguments = arguments; + } + + /** + * Gets the counters (database sequence values, e.g.) for the transformation. + * + * @return a named table of counters. + * @deprecated moved to Trans + */ + @Deprecated + public Hashtable getCounters() { + return counters; + } + + /** + * Sets the counters (database sequence values, e.g.) for the transformation. + * + * @param counters The counters to set. + * @deprecated moved to Trans + */ + @Deprecated + public void setCounters(Hashtable counters) { + this.counters = counters; + } + + /** + * Gets a list of dependencies for the transformation + * + * @return a list of the dependencies for the transformation + */ + public List getDependencies() { + return dependencies; + } + + /** + * Sets the dependencies for the transformation. + * + * @param dependencies The dependency list to set. + */ + public void setDependencies(List dependencies) { + this.dependencies = dependencies; + } + + /** + * Gets the database connection associated with "max date" processing. The connection, along with a specified table + * and field, allows for the filtering of the number of rows to process in a transformation by time, such as only + * processing the rows/records since the last time the transformation ran correctly. This can be used for auditing and + * throttling data during warehousing operations. + * + * @return Returns the meta-data associated with the most recent database connection. + */ + public DatabaseMeta getMaxDateConnection() { + return maxDateConnection; + } + + /** + * Sets the database connection associated with "max date" processing. + * + * @param maxDateConnection the database meta-data to set + * @see #getMaxDateConnection() + */ + public void setMaxDateConnection(DatabaseMeta maxDateConnection) { + this.maxDateConnection = maxDateConnection; + } + + /** + * Gets the maximum date difference between start and end dates for row/record processing. This can be used for + * auditing and throttling data during warehousing operations. + * + * @return the maximum date difference + */ + public double getMaxDateDifference() { + return maxDateDifference; + } + + /** + * Sets the maximum date difference between start and end dates for row/record processing. + * + * @param maxDateDifference The date difference to set. + * @see #getMaxDateDifference() + */ + public void setMaxDateDifference(double maxDateDifference) { + this.maxDateDifference = maxDateDifference; + } + + /** + * Gets the date field associated with "max date" processing. This allows for the filtering of the number of rows to + * process in a transformation by time, such as only processing the rows/records since the last time the + * transformation ran correctly. This can be used for auditing and throttling data during warehousing operations. + * + * @return a string representing the date for the most recent database connection. + * @see #getMaxDateConnection() + */ + public String getMaxDateField() { + return maxDateField; + } + + /** + * Sets the date field associated with "max date" processing. + * + * @param maxDateField The date field to set. + * @see #getMaxDateField() + */ + public void setMaxDateField(String maxDateField) { + this.maxDateField = maxDateField; + } + + /** + * Gets the amount by which to increase the "max date" difference. This is used in "max date" processing, and can be + * used to provide more fine-grained control of the date range. For example, if the end date specifies a minute for + * which the data is not complete, you can "roll-back" the end date by one minute by + * + * @return Returns the maxDateOffset. + * @see #setMaxDateOffset(double) + */ + public double getMaxDateOffset() { + return maxDateOffset; + } + + /** + * Sets the amount by which to increase the end date in "max date" processing. This can be used to provide more + * fine-grained control of the date range. For example, if the end date specifies a minute for which the data is not + * complete, you can "roll-back" the end date by one minute by setting the offset to -60. + * + * @param maxDateOffset The maxDateOffset to set. + */ + public void setMaxDateOffset(double maxDateOffset) { + this.maxDateOffset = maxDateOffset; + } + + /** + * Gets the database table providing a date to be used in "max date" processing. This allows for the filtering of the + * number of rows to process in a transformation by time, such as only processing the rows/records since the last time + * the transformation ran correctly. + * + * @return Returns the maxDateTable. + * @see #getMaxDateConnection() + */ + public String getMaxDateTable() { + return maxDateTable; + } + + /** + * Sets the table name associated with "max date" processing. + * + * @param maxDateTable The maxDateTable to set. + * @see #getMaxDateTable() + */ + public void setMaxDateTable(String maxDateTable) { + this.maxDateTable = maxDateTable; + } + + /** + * Gets the size of the rowsets. + * + * @return Returns the size of the rowsets. + */ + public int getSizeRowset() { + String rowSetSize = getVariable(Const.KETTLE_TRANS_ROWSET_SIZE); + int altSize = Const.toInt(rowSetSize, 0); + if (altSize > 0) { + return altSize; + } else { + return sizeRowset; + } + } + + /** + * Sets the size of the rowsets. This method allows you to change the size of the buffers between the connected steps + * in a transformation. NOTE: Do not change this parameter unless you are running low on memory, for example. + * + * @param sizeRowset The sizeRowset to set. + */ + public void setSizeRowset(int sizeRowset) { + this.sizeRowset = sizeRowset; + } + + /** + * Gets the database cache object. + * + * @return the database cache object. + */ + public DBCache getDbCache() { + return dbCache; + } + + /** + * Sets the database cache object. + * + * @param dbCache the database cache object to set + */ + public void setDbCache(DBCache dbCache) { + this.dbCache = dbCache; + } + + /** + * Gets the version of the transformation. + * + * @return The version of the transformation + */ + public String getTransversion() { + return trans_version; + } + + /** + * Sets the version of the transformation. + * + * @param n The new version description of the transformation + */ + public void setTransversion(String n) { + trans_version = n; + } + + /** + * Sets the status of the transformation. + * + * @param n The new status description of the transformation + */ + public void setTransstatus(int n) { + trans_status = n; + } + + /** + * Gets the status of the transformation. + * + * @return The status of the transformation + */ + public int getTransstatus() { + return trans_status; + } + + /** + * Gets a textual representation of the transformation. If its name has been set, it will be returned, otherwise the + * classname is returned. + * + * @return the textual representation of the transformation. + */ + @Override + public String toString() { + if (!Const.isEmpty(filename)) { + if (Const.isEmpty(name)) { + return filename; + } else { + return filename + " : " + name; + } + } + + if (name != null) { + if (directory != null) { + String path = directory.getPath(); + if (path.endsWith(RepositoryDirectory.DIRECTORY_SEPARATOR)) { + return path + name; + } else { + return path + RepositoryDirectory.DIRECTORY_SEPARATOR + name; + } + } else { + return name; + } + } else { + return TransMeta.class.getName(); + } + } + + /** + * Cancels queries opened for checking & fieldprediction. + * + * @throws KettleDatabaseException if any errors occur during query cancellation + */ + public void cancelQueries() throws KettleDatabaseException { + for (int i = 0; i < nrSteps(); i++) { + getStep(i).getStepMetaInterface().cancelQueries(); + } + } + + /** + * Gets the arguments (and their values) used by this transformation. If argument values are supplied by parameter, + * the values will used for the arguments. If the values are null or empty, the method will attempt to use argument + * values from a previous execution. + * + * @param arguments the values for the arguments + * @return A row with the used arguments (and their values) in it. + */ + public Map getUsedArguments(String[] arguments) { + Map transArgs = new HashMap(); + + for (int i = 0; i < nrSteps(); i++) { + StepMetaInterface smi = getStep(i).getStepMetaInterface(); + Map stepArgs = smi.getUsedArguments(); // Get the command line arguments that this step uses. + if (stepArgs != null) { + transArgs.putAll(stepArgs); + } + } + + // OK, so perhaps, we can use the arguments from a previous execution? + String[] saved = Props.isInitialized() ? Props.getInstance().getLastArguments() : null; + + // Set the default values on it... + // Also change the name to "Argument 1" .. "Argument 10" + // + for (String argument : transArgs.keySet()) { + String value = ""; + int argNr = Const.toInt(argument, -1); + if (arguments != null && argNr > 0 && argNr <= arguments.length) { + value = Const.NVL(arguments[argNr - 1], ""); + } + if (value.length() == 0) { // try the saved option... + + if (argNr > 0 && argNr < saved.length && saved[argNr] != null) { + value = saved[argNr - 1]; + } + } + transArgs.put(argument, value); + } + + return transArgs; + } + + /** + * Gets the amount of time (in nano-seconds) to wait while the input buffer is empty. + * + * @return the number of nano-seconds to wait while the input buffer is empty. + */ + public int getSleepTimeEmpty() { + return sleepTimeEmpty; + } + + /** + * Gets the amount of time (in nano-seconds) to wait while the input buffer is full. + * + * @return the number of nano-seconds to wait while the input buffer is full. + */ + public int getSleepTimeFull() { + return sleepTimeFull; + } + + /** + * Sets the amount of time (in nano-seconds) to wait while the input buffer is empty. + * + * @param sleepTimeEmpty the number of nano-seconds to wait while the input buffer is empty. + */ + public void setSleepTimeEmpty(int sleepTimeEmpty) { + this.sleepTimeEmpty = sleepTimeEmpty; + } + + /** + * Sets the amount of time (in nano-seconds) to wait while the input buffer is full. + * + * @param sleepTimeFull the number of nano-seconds to wait while the input buffer is full. + */ + public void setSleepTimeFull(int sleepTimeFull) { + this.sleepTimeFull = sleepTimeFull; + } + + /** + * This method asks all steps in the transformation whether or not the specified database connection is used. The + * connection is used in the transformation if any of the steps uses it or if it is being used to log to. + * + * @param databaseMeta The connection to check + * @return true if the connection is used in this transformation. + */ + public boolean isDatabaseConnectionUsed(DatabaseMeta databaseMeta) { + for (int i = 0; i < nrSteps(); i++) { + StepMeta stepMeta = getStep(i); + DatabaseMeta[] dbs = stepMeta.getStepMetaInterface().getUsedDatabaseConnections(); + for (int d = 0; d < dbs.length; d++) { + if (dbs[d].equals(databaseMeta)) { + return true; + } + } + } + + if (transLogTable.getDatabaseMeta() != null && transLogTable.getDatabaseMeta().equals(databaseMeta)) { + return true; + } + + return false; + } + + /* + * public List getInputFiles() { return inputFiles; } + * + * public void setInputFiles(List inputFiles) { this.inputFiles = inputFiles; } */ - public SlaveStepCopyPartitionDistribution getSlaveStepCopyPartitionDistribution() { - return slaveStepCopyPartitionDistribution; - } - /** - * Sets the slave step copy partition distribution. - * - * @param slaveStepCopyPartitionDistribution - * the slaveStepCopyPartitionDistribution to set - */ - public void setSlaveStepCopyPartitionDistribution( - SlaveStepCopyPartitionDistribution slaveStepCopyPartitionDistribution ) { - this.slaveStepCopyPartitionDistribution = slaveStepCopyPartitionDistribution; - } + /** + * Gets a list of all the strings used in this transformation. The parameters indicate which collections to search and + * which to exclude. + * + * @param searchSteps true if steps should be searched, false otherwise + * @param searchDatabases true if databases should be searched, false otherwise + * @param searchNotes true if notes should be searched, false otherwise + * @param includePasswords true if passwords should be searched, false otherwise + * @return a list of search results for strings used in the transformation. + */ + public List getStringList(boolean searchSteps, boolean searchDatabases, boolean searchNotes, + boolean includePasswords) { + List stringList = new ArrayList(); + + if (searchSteps) { + // Loop over all steps in the transformation and see what the used vars are... + for (int i = 0; i < nrSteps(); i++) { + StepMeta stepMeta = getStep(i); + stringList.add(new StringSearchResult(stepMeta.getName(), stepMeta, this, + BaseMessages.getString(PKG, "TransMeta.SearchMetadata.StepName"))); + if (stepMeta.getDescription() != null) { + stringList.add(new StringSearchResult(stepMeta.getDescription(), stepMeta, this, + BaseMessages.getString(PKG, "TransMeta.SearchMetadata.StepDescription"))); + } + StepMetaInterface metaInterface = stepMeta.getStepMetaInterface(); + StringSearcher.findMetaData(metaInterface, 1, stringList, stepMeta, this); + } + } + + // Loop over all steps in the transformation and see what the used vars are... + if (searchDatabases) { + for (int i = 0; i < nrDatabases(); i++) { + DatabaseMeta meta = getDatabase(i); + stringList.add(new StringSearchResult(meta.getName(), meta, this, + BaseMessages.getString(PKG, "TransMeta.SearchMetadata.DatabaseConnectionName"))); + if (meta.getHostname() != null) { + stringList.add(new StringSearchResult(meta.getHostname(), meta, this, + BaseMessages.getString(PKG, "TransMeta.SearchMetadata.DatabaseHostName"))); + } + if (meta.getDatabaseName() != null) { + stringList.add(new StringSearchResult(meta.getDatabaseName(), meta, this, + BaseMessages.getString(PKG, "TransMeta.SearchMetadata.DatabaseName"))); + } + if (meta.getUsername() != null) { + stringList.add(new StringSearchResult(meta.getUsername(), meta, this, + BaseMessages.getString(PKG, "TransMeta.SearchMetadata.DatabaseUsername"))); + } + if (meta.getPluginId() != null) { + stringList.add(new StringSearchResult(meta.getPluginId(), meta, this, + BaseMessages.getString(PKG, "TransMeta.SearchMetadata.DatabaseTypeDescription"))); + } + if (meta.getDatabasePortNumberString() != null) { + stringList.add(new StringSearchResult(meta.getDatabasePortNumberString(), meta, this, + BaseMessages.getString(PKG, "TransMeta.SearchMetadata.DatabasePort"))); + } + if (meta.getServername() != null) { + stringList.add(new StringSearchResult(meta.getServername(), meta, this, + BaseMessages.getString(PKG, "TransMeta.SearchMetadata.DatabaseServer"))); + } + if (includePasswords) { + if (meta.getPassword() != null) { + stringList.add(new StringSearchResult(meta.getPassword(), meta, this, + BaseMessages.getString(PKG, "TransMeta.SearchMetadata.DatabasePassword"))); + } + } + } + } + + // Loop over all steps in the transformation and see what the used vars are... + if (searchNotes) { + for (int i = 0; i < nrNotes(); i++) { + NotePadMeta meta = getNote(i); + if (meta.getNote() != null) { + stringList.add(new StringSearchResult(meta.getNote(), meta, this, + BaseMessages.getString(PKG, "TransMeta.SearchMetadata.NotepadText"))); + } + } + } + + return stringList; + } + + /** + * Get a list of all the strings used in this transformation. The parameters indicate which collections to search and + * which to exclude. + * + * @param searchSteps true if steps should be searched, false otherwise + * @param searchDatabases true if databases should be searched, false otherwise + * @param searchNotes true if notes should be searched, false otherwise + * @return a list of search results for strings used in the transformation. + */ + public List getStringList(boolean searchSteps, boolean searchDatabases, boolean searchNotes) { + return getStringList(searchSteps, searchDatabases, searchNotes, false); + } + + /** + * Gets a list of the used variables in this transformation. + * + * @return a list of the used variables in this transformation. + */ + public List getUsedVariables() { + // Get the list of Strings. + List stringList = getStringList(true, true, false, true); + + List varList = new ArrayList(); + + // Look around in the strings, see what we find... + for (int i = 0; i < stringList.size(); i++) { + StringSearchResult result = stringList.get(i); + StringUtil.getUsedVariables(result.getString(), varList, false); + } + + return varList; + } + + /** + * Gets the previous result. + * + * @return the previous Result. + * @deprecated this was moved to Trans to keep the metadata stateless + */ + @Deprecated + public Result getPreviousResult() { + return previousResult; + } + + /** + * Sets the previous result. + * + * @param previousResult The previous Result to set. + * @deprecated this was moved to Trans to keep the metadata stateless + */ + @Deprecated + public void setPreviousResult(Result previousResult) { + this.previousResult = previousResult; + } + + /** + * Gets a list of the files in the result. + * + * @return a list of ResultFiles. + * @deprecated this was moved to Trans to keep the metadata stateless + */ + @Deprecated + public List getResultFiles() { + return resultFiles; + } + + /** + * Sets the list of the files in the result. + * + * @param resultFiles The list of ResultFiles to set. + * @deprecated this was moved to Trans to keep the metadata stateless + */ + @Deprecated + public void setResultFiles(List resultFiles) { + this.resultFiles = resultFiles; + } + + /** + * Gets a list of partition schemas for this transformation. + * + * @return a list of PartitionSchemas + */ + public List getPartitionSchemas() { + return partitionSchemas; + } + + /** + * Sets the list of partition schemas for this transformation. + * + * @param partitionSchemas the list of PartitionSchemas to set + */ + public void setPartitionSchemas(List partitionSchemas) { + this.partitionSchemas = partitionSchemas; + } + + /** + * Gets the partition schemas' names. + * + * @return a String array containing the available partition schema names. + */ + public String[] getPartitionSchemasNames() { + String[] names = new String[partitionSchemas.size()]; + for (int i = 0; i < names.length; i++) { + names[i] = partitionSchemas.get(i).getName(); + } + return names; + } + + /** + * Checks if is feedback shown. + * + * @return true if feedback is shown, false otherwise + */ + public boolean isFeedbackShown() { + return feedbackShown; + } + + /** + * Sets whether the feedback should be shown. + * + * @param feedbackShown true if feedback should be shown, false otherwise + */ + public void setFeedbackShown(boolean feedbackShown) { + this.feedbackShown = feedbackShown; + } + + /** + * Gets the feedback size. + * + * @return the feedback size + */ + public int getFeedbackSize() { + return feedbackSize; + } + + /** + * Sets the feedback size. + * + * @param feedbackSize the feedback size to set + */ + public void setFeedbackSize(int feedbackSize) { + this.feedbackSize = feedbackSize; + } + + /** + * Checks if the transformation is using unique database connections. + * + * @return true if the transformation is using unique database connections, false otherwise + */ + public boolean isUsingUniqueConnections() { + return usingUniqueConnections; + } + + /** + * Sets whether the transformation is using unique database connections. + * + * @param usingUniqueConnections true if the transformation is using unique database connections, false otherwise + */ + public void setUsingUniqueConnections(boolean usingUniqueConnections) { + this.usingUniqueConnections = usingUniqueConnections; + } + + /** + * Gets a list of the cluster schemas used by the transformation. + * + * @return a list of ClusterSchemas + */ + public List getClusterSchemas() { + return clusterSchemas; + } + + /** + * Sets list of the cluster schemas used by the transformation. + * + * @param clusterSchemas the list of ClusterSchemas to set + */ + public void setClusterSchemas(List clusterSchemas) { + this.clusterSchemas = clusterSchemas; + } + + /** + * Gets the cluster schema names. + * + * @return a String array containing the cluster schemas' names + */ + public String[] getClusterSchemaNames() { + String[] names = new String[clusterSchemas.size()]; + for (int i = 0; i < names.length; i++) { + names[i] = clusterSchemas.get(i).getName(); + } + return names; + } + + /** + * Find a partition schema using its name. + * + * @param name The name of the partition schema to look for. + * @return the partition with the specified name of null if nothing was found + */ + public PartitionSchema findPartitionSchema(String name) { + for (int i = 0; i < partitionSchemas.size(); i++) { + PartitionSchema schema = partitionSchemas.get(i); + if (schema.getName().equalsIgnoreCase(name)) { + return schema; + } + } + return null; + } + + /** + * Find a clustering schema using its name. + * + * @param name The name of the clustering schema to look for. + * @return the cluster schema with the specified name of null if nothing was found + */ + public ClusterSchema findClusterSchema(String name) { + for (int i = 0; i < clusterSchemas.size(); i++) { + ClusterSchema schema = clusterSchemas.get(i); + if (schema.getName().equalsIgnoreCase(name)) { + return schema; + } + } + return null; + } + + /** + * Add a new partition schema to the transformation if that didn't exist yet. Otherwise, replace it. + * + * @param partitionSchema The partition schema to be added. + */ + public void addOrReplacePartitionSchema(PartitionSchema partitionSchema) { + int index = partitionSchemas.indexOf(partitionSchema); + if (index < 0) { + partitionSchemas.add(partitionSchema); + } else { + PartitionSchema previous = partitionSchemas.get(index); + previous.replaceMeta(partitionSchema); + } + setChanged(); + } + + /** + * Add a new cluster schema to the transformation if that didn't exist yet. Otherwise, replace it. + * + * @param clusterSchema The cluster schema to be added. + */ + public void addOrReplaceClusterSchema(ClusterSchema clusterSchema) { + int index = clusterSchemas.indexOf(clusterSchema); + if (index < 0) { + clusterSchemas.add(clusterSchema); + } else { + ClusterSchema previous = clusterSchemas.get(index); + previous.replaceMeta(clusterSchema); + } + setChanged(); + } + + /** + * Save shared objects, including databases, steps, partition schemas, slave servers, and cluster schemas, to a file + * + * @throws KettleException the kettle exception + * @see org.pentaho.di.core.EngineMetaInterface#saveSharedObjects() + * @see org.pentaho.di.shared.SharedObjects#saveToFile() + */ + public void saveSharedObjects() throws KettleException { + try { + // Save the meta store shared objects... + // + saveMetaStoreObjects(repository, metaStore); + + // Load all the shared objects... + String soFile = environmentSubstitute(sharedObjectsFile); + SharedObjects sharedObjects = new SharedObjects(soFile); + + // Now overwrite the objects in there + List shared = new ArrayList(); + shared.addAll(databases); + shared.addAll(steps); + shared.addAll(partitionSchemas); + shared.addAll(slaveServers); + shared.addAll(clusterSchemas); + + // The databases connections... + for (SharedObjectInterface sharedObject : shared) { + if (sharedObject.isShared()) { + sharedObjects.storeObject(sharedObject); + } + } + + // Save the objects + sharedObjects.saveToFile(); + } catch (Exception e) { + throw new KettleException("Unable to save shared ojects", e); + } + } + + /** + * Checks whether the transformation is using thread priority management. + * + * @return true if the transformation is using thread priority management, false otherwise + */ + public boolean isUsingThreadPriorityManagment() { + return usingThreadPriorityManagment; + } + + /** + * Sets whether the transformation is using thread priority management. + * + * @param usingThreadPriorityManagment true if the transformation is using thread priority management, false otherwise + */ + public void setUsingThreadPriorityManagment(boolean usingThreadPriorityManagment) { + this.usingThreadPriorityManagment = usingThreadPriorityManagment; + } + + /** + * Check a step to see if there are no multiple steps to read from. If so, check to see if the receiving rows are all + * the same in layout. We only want to ONLY use the DBCache for this to prevent GUI stalls. + * + * @param stepMeta the step to check + * @param monitor the monitor + * @throws KettleRowException in case we detect a row mixing violation + */ + public void checkRowMixingStatically(StepMeta stepMeta, ProgressMonitorListener monitor) throws KettleRowException { + int nrPrevious = findNrPrevSteps(stepMeta); + if (nrPrevious > 1) { + RowMetaInterface referenceRow = null; + // See if all previous steps send out the same rows... + for (int i = 0; i < nrPrevious; i++) { + StepMeta previousStep = findPrevStep(stepMeta, i); + try { + RowMetaInterface row = getStepFields(previousStep, monitor); // Throws KettleStepException + if (referenceRow == null) { + referenceRow = row; + } else if (!stepMeta.getStepMetaInterface().excludeFromRowLayoutVerification()) { + BaseStep.safeModeChecking(referenceRow, row); + } + } catch (KettleStepException e) { + // We ignore this one because we are in the process of designing the transformation, anything intermediate can + // go wrong. + } + } + } + } + + /** + * Sets the internal kettle variables. + * + * @param var the new internal kettle variables + */ + public void setInternalKettleVariables(VariableSpace var) { + setInternalFilenameKettleVariables(var); + setInternalNameKettleVariable(var); + + // The name of the directory in the repository + // + variables.setVariable(Const.INTERNAL_VARIABLE_TRANSFORMATION_REPOSITORY_DIRECTORY, + directory != null ? directory.getPath() : ""); + + boolean hasRepoDir = getRepositoryDirectory() != null && getRepository() != null; + + if (hasRepoDir) { + variables.setVariable(Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY, + variables.getVariable(Const.INTERNAL_VARIABLE_TRANSFORMATION_REPOSITORY_DIRECTORY)); + } else { + variables.setVariable(Const.INTERNAL_VARIABLE_TRANSFORMATION_REPOSITORY_DIRECTORY, + variables.getVariable(Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY)); + } + + // Here we don't remove the job specific parameters, as they may come in handy. + // + if (variables.getVariable(Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY) == null) { + variables.setVariable(Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY, "Parent Job File Directory"); + } + if (variables.getVariable(Const.INTERNAL_VARIABLE_JOB_FILENAME_NAME) == null) { + variables.setVariable(Const.INTERNAL_VARIABLE_JOB_FILENAME_NAME, "Parent Job Filename"); + } + if (variables.getVariable(Const.INTERNAL_VARIABLE_JOB_NAME) == null) { + variables.setVariable(Const.INTERNAL_VARIABLE_JOB_NAME, "Parent Job Name"); + } + if (variables.getVariable(Const.INTERNAL_VARIABLE_JOB_REPOSITORY_DIRECTORY) == null) { + variables.setVariable(Const.INTERNAL_VARIABLE_JOB_REPOSITORY_DIRECTORY, "Parent Job Repository Directory"); + } + + variables.setVariable(Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY, + variables.getVariable(repository != null ? Const.INTERNAL_VARIABLE_TRANSFORMATION_REPOSITORY_DIRECTORY + : Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY)); + } + + /** + * Sets the internal name kettle variable. + * + * @param var the new internal name kettle variable + */ + protected void setInternalNameKettleVariable(VariableSpace var) { + // The name of the transformation + // + variables.setVariable(Const.INTERNAL_VARIABLE_TRANSFORMATION_NAME, Const.NVL(name, "")); + } + + /** + * Sets the internal filename kettle variables. + * + * @param var the new internal filename kettle variables + */ + protected void setInternalFilenameKettleVariables(VariableSpace var) { + // If we have a filename that's defined, set variables. If not, clear them. + // + if (!Const.isEmpty(filename)) { + try { + FileObject fileObject = KettleVFS.getFileObject(filename, var); + FileName fileName = fileObject.getName(); + + // The filename of the transformation + variables.setVariable(Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_NAME, fileName.getBaseName()); + + // The directory of the transformation + FileName fileDir = fileName.getParent(); + variables.setVariable(Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY, fileDir.getURI()); + } catch (KettleFileException e) { + log.logError("Unexpected error setting internal filename variables!", e); + + variables.setVariable(Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY, ""); + variables.setVariable(Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_NAME, ""); + } + } else { + variables.setVariable(Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY, ""); + variables.setVariable(Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_NAME, ""); + } + + } + + /** + * Finds the mapping input step with the specified name. If no mapping input step is found, null is returned + * + * @param stepname the name to search for + * @return the step meta-data corresponding to the desired mapping input step, or null if no step was found + * @throws KettleStepException if any errors occur during the search + */ + public StepMeta findMappingInputStep(String stepname) throws KettleStepException { + if (!Const.isEmpty(stepname)) { + StepMeta stepMeta = findStep(stepname); // TODO verify that it's a mapping input!! + if (stepMeta == null) { + throw new KettleStepException(BaseMessages.getString( + PKG, "TransMeta.Exception.StepNameNotFound", stepname)); + } + return stepMeta; + } else { + // Find the first mapping input step that fits the bill. + StepMeta stepMeta = null; + for (StepMeta mappingStep : steps) { + if (mappingStep.getStepID().equals("MappingInput")) { + if (stepMeta == null) { + stepMeta = mappingStep; + } else if (stepMeta != null) { + throw new KettleStepException(BaseMessages.getString( + PKG, "TransMeta.Exception.OnlyOneMappingInputStepAllowed", "2")); + } + } + } + if (stepMeta == null) { + throw new KettleStepException(BaseMessages.getString( + PKG, "TransMeta.Exception.OneMappingInputStepRequired")); + } + return stepMeta; + } + } + + /** + * Finds the mapping output step with the specified name. If no mapping output step is found, null is returned. + * + * @param stepname the name to search for + * @return the step meta-data corresponding to the desired mapping input step, or null if no step was found + * @throws KettleStepException if any errors occur during the search + */ + public StepMeta findMappingOutputStep(String stepname) throws KettleStepException { + if (!Const.isEmpty(stepname)) { + StepMeta stepMeta = findStep(stepname); // TODO verify that it's a mapping output step. + if (stepMeta == null) { + throw new KettleStepException(BaseMessages.getString( + PKG, "TransMeta.Exception.StepNameNotFound", stepname)); + } + return stepMeta; + } else { + // Find the first mapping output step that fits the bill. + StepMeta stepMeta = null; + for (StepMeta mappingStep : steps) { + if (mappingStep.getStepID().equals("MappingOutput")) { + if (stepMeta == null) { + stepMeta = mappingStep; + } else if (stepMeta != null) { + throw new KettleStepException(BaseMessages.getString( + PKG, "TransMeta.Exception.OnlyOneMappingOutputStepAllowed", "2")); + } + } + } + if (stepMeta == null) { + throw new KettleStepException(BaseMessages.getString( + PKG, "TransMeta.Exception.OneMappingOutputStepRequired")); + } + return stepMeta; + } + } + + /** + * Gets a list of the resource dependencies. + * + * @return a list of ResourceReferences + */ + public List getResourceDependencies() { + List resourceReferences = new ArrayList(); + + for (StepMeta stepMeta : steps) { + resourceReferences.addAll(stepMeta.getResourceDependencies(this)); + } - /** - * Finds the first used cluster schema. - * - * @return the first used cluster schema - */ - public ClusterSchema findFirstUsedClusterSchema() { - for ( StepMeta stepMeta : steps ) { - if ( stepMeta.getClusterSchema() != null ) { - return stepMeta.getClusterSchema(); - } + return resourceReferences; } - return null; - } - /** - * Checks whether the transformation is a slave transformation. - * - * @return true if the transformation is a slave transformation, false otherwise - */ - public boolean isSlaveTransformation() { - return slaveTransformation; - } + /** + * Exports the specified objects to a flat-file system, adding content with filename keys to a set of definitions. The + * supplied resource naming interface allows the object to name appropriately without worrying about those parts of + * the implementation specific details. + * + * @param space the variable space to use + * @param definitions + * @param resourceNamingInterface + * @param repository The repository to optionally load other resources from (to be converted to XML) + * @param metaStore the metaStore in which non-kettle metadata could reside. + * @return the filename of the exported resource + */ + public String exportResources(VariableSpace space, Map definitions, + ResourceNamingInterface resourceNamingInterface, Repository repository, IMetaStore metaStore) throws KettleException { - /** - * Sets whether the transformation is a slave transformation. - * - * @param slaveTransformation - * true if the transformation is a slave transformation, false otherwise - */ - public void setSlaveTransformation( boolean slaveTransformation ) { - this.slaveTransformation = slaveTransformation; - } + try { + // Handle naming for both repository and XML bases resources... + // + String baseName; + String originalPath; + String fullname; + String extension = "ktr"; + if (Const.isEmpty(getFilename())) { + // Assume repository... + // + originalPath = directory.getPath(); + baseName = getName(); + fullname = + directory.getPath() + + (directory.getPath().endsWith(RepositoryDirectory.DIRECTORY_SEPARATOR) + ? "" : RepositoryDirectory.DIRECTORY_SEPARATOR) + getName() + "." + extension; // + } else { + // Assume file + // + FileObject fileObject = KettleVFS.getFileObject(space.environmentSubstitute(getFilename()), space); + originalPath = fileObject.getParent().getURL().toString(); + baseName = fileObject.getName().getBaseName(); + fullname = fileObject.getURL().toString(); + } - /** - * Checks whether the transformation is capturing step performance snapshots. - * - * @return true if the transformation is capturing step performance snapshots, false otherwise - */ - public boolean isCapturingStepPerformanceSnapShots() { - return capturingStepPerformanceSnapShots; - } + String + exportFileName = + resourceNamingInterface + .nameResource(baseName, originalPath, extension, ResourceNamingInterface.FileNamingType.TRANSFORMATION); + ResourceDefinition definition = definitions.get(exportFileName); + if (definition == null) { + // If we do this once, it will be plenty :-) + // + TransMeta transMeta = (TransMeta) this.realClone(false); + // transMeta.copyVariablesFrom(space); + + // Add used resources, modify transMeta accordingly + // Go through the list of steps, etc. + // These critters change the steps in the cloned TransMeta + // At the end we make a new XML version of it in "exported" + // format... + + // loop over steps, databases will be exported to XML anyway. + // + for (StepMeta stepMeta : transMeta.getSteps()) { + stepMeta.exportResources(space, definitions, resourceNamingInterface, repository, metaStore); + } - /** - * Sets whether the transformation is capturing step performance snapshots. - * - * @param capturingStepPerformanceSnapShots - * true if the transformation is capturing step performance snapshots, false otherwise - */ - public void setCapturingStepPerformanceSnapShots( boolean capturingStepPerformanceSnapShots ) { - this.capturingStepPerformanceSnapShots = capturingStepPerformanceSnapShots; - } + // Change the filename, calling this sets internal variables + // inside of the transformation. + // + transMeta.setFilename(exportFileName); + + // All objects get re-located to the root folder + // + transMeta.setRepositoryDirectory(new RepositoryDirectory()); + + // Set a number of parameters for all the data files referenced so far... + // + Map directoryMap = resourceNamingInterface.getDirectoryMap(); + if (directoryMap != null) { + for (String directory : directoryMap.keySet()) { + String parameterName = directoryMap.get(directory); + transMeta.addParameterDefinition(parameterName, directory, "Data file path discovered during export"); + } + } - /** - * Gets the step performance capturing delay. - * - * @return the step performance capturing delay - */ - public long getStepPerformanceCapturingDelay() { - return stepPerformanceCapturingDelay; - } + // At the end, add ourselves to the map... + // + String transMetaContent = transMeta.getXML(); - /** - * Sets the step performance capturing delay. - * - * @param stepPerformanceCapturingDelay - * the stepPerformanceCapturingDelay to set - */ - public void setStepPerformanceCapturingDelay( long stepPerformanceCapturingDelay ) { - this.stepPerformanceCapturingDelay = stepPerformanceCapturingDelay; - } + definition = new ResourceDefinition(exportFileName, transMetaContent); - /** - * Gets the step performance capturing size limit. - * - * @return the step performance capturing size limit - */ - public String getStepPerformanceCapturingSizeLimit() { - return stepPerformanceCapturingSizeLimit; - } + // Also remember the original filename (if any), including variables etc. + // + if (Const.isEmpty(this.getFilename())) { // Repository + definition.setOrigin(fullname); + } else { + definition.setOrigin(this.getFilename()); + } - /** - * Sets the step performance capturing size limit. - * - * @param stepPerformanceCapturingSizeLimit - * the step performance capturing size limit to set - */ - public void setStepPerformanceCapturingSizeLimit( String stepPerformanceCapturingSizeLimit ) { - this.stepPerformanceCapturingSizeLimit = stepPerformanceCapturingSizeLimit; - } + definitions.put(fullname, definition); + } + return exportFileName; + } catch (FileSystemException e) { + throw new KettleException(BaseMessages.getString( + PKG, "TransMeta.Exception.ErrorOpeningOrValidatingTheXMLFile", getFilename()), e); + } catch (KettleFileException e) { + throw new KettleException(BaseMessages.getString( + PKG, "TransMeta.Exception.ErrorOpeningOrValidatingTheXMLFile", getFilename()), e); + } + } - /** - * Clears the step fields and loop caches. - */ - public void clearCaches() { - clearStepFieldsCachce(); - clearLoopCache(); - } + /** + * Gets the slave step copy partition distribution. + * + * @return the SlaveStepCopyPartitionDistribution + */ + public SlaveStepCopyPartitionDistribution getSlaveStepCopyPartitionDistribution() { + return slaveStepCopyPartitionDistribution; + } - /** - * Clears the step fields cachce. - */ - private void clearStepFieldsCachce() { - stepsFieldsCache.clear(); - } + /** + * Sets the slave step copy partition distribution. + * + * @param slaveStepCopyPartitionDistribution the slaveStepCopyPartitionDistribution to set + */ + public void setSlaveStepCopyPartitionDistribution( + SlaveStepCopyPartitionDistribution slaveStepCopyPartitionDistribution) { + this.slaveStepCopyPartitionDistribution = slaveStepCopyPartitionDistribution; + } - /** - * Clears the loop cache. - */ - private void clearLoopCache() { - loopCache.clear(); - } + /** + * Finds the first used cluster schema. + * + * @return the first used cluster schema + */ + public ClusterSchema findFirstUsedClusterSchema() { + for (StepMeta stepMeta : steps) { + if (stepMeta.getClusterSchema() != null) { + return stepMeta.getClusterSchema(); + } + } + return null; + } - /** - * Gets the repository element type. - * - * @return the repository element type - * @see org.pentaho.di.repository.RepositoryElementInterface#getRepositoryElementType() - */ - public RepositoryObjectType getRepositoryElementType() { - return REPOSITORY_ELEMENT_TYPE; - } + /** + * Checks whether the transformation is a slave transformation. + * + * @return true if the transformation is a slave transformation, false otherwise + */ + public boolean isSlaveTransformation() { + return slaveTransformation; + } - /** - * Gets the log channel. - * - * @return the log channel - */ - public LogChannelInterface getLogChannel() { - return log; - } + /** + * Sets whether the transformation is a slave transformation. + * + * @param slaveTransformation true if the transformation is a slave transformation, false otherwise + */ + public void setSlaveTransformation(boolean slaveTransformation) { + this.slaveTransformation = slaveTransformation; + } - /** - * Gets the log channel ID. - * - * @return the log channel ID - * @see org.pentaho.di.core.logging.LoggingObjectInterface#getLogChannelId() - */ - public String getLogChannelId() { - return log.getLogChannelId(); - } + /** + * Checks whether the transformation is capturing step performance snapshots. + * + * @return true if the transformation is capturing step performance snapshots, false otherwise + */ + public boolean isCapturingStepPerformanceSnapShots() { + return capturingStepPerformanceSnapShots; + } - /** - * Gets the object type. - * - * @return the object type - * @see org.pentaho.di.core.logging.LoggingObjectInterface#getObjectType() - */ - public LoggingObjectType getObjectType() { - return LoggingObjectType.TRANSMETA; - } + /** + * Sets whether the transformation is capturing step performance snapshots. + * + * @param capturingStepPerformanceSnapShots true if the transformation is capturing step performance snapshots, false otherwise + */ + public void setCapturingStepPerformanceSnapShots(boolean capturingStepPerformanceSnapShots) { + this.capturingStepPerformanceSnapShots = capturingStepPerformanceSnapShots; + } - /** - * Gets the log table for the transformation. - * - * @return the log table for the transformation - */ - public TransLogTable getTransLogTable() { - return transLogTable; - } + /** + * Gets the step performance capturing delay. + * + * @return the step performance capturing delay + */ + public long getStepPerformanceCapturingDelay() { + return stepPerformanceCapturingDelay; + } - /** - * Sets the log table for the transformation. - * - * @param the - * log table to set - */ - public void setTransLogTable( TransLogTable transLogTable ) { - this.transLogTable = transLogTable; - } + /** + * Sets the step performance capturing delay. + * + * @param stepPerformanceCapturingDelay the stepPerformanceCapturingDelay to set + */ + public void setStepPerformanceCapturingDelay(long stepPerformanceCapturingDelay) { + this.stepPerformanceCapturingDelay = stepPerformanceCapturingDelay; + } - /** - * Gets the performance log table for the transformation. - * - * @return the performance log table for the transformation - */ - public PerformanceLogTable getPerformanceLogTable() { - return performanceLogTable; - } + /** + * Gets the step performance capturing size limit. + * + * @return the step performance capturing size limit + */ + public String getStepPerformanceCapturingSizeLimit() { + return stepPerformanceCapturingSizeLimit; + } - /** - * Sets the performance log table for the transformation. - * - * @param performanceLogTable - * the performance log table to set - */ - public void setPerformanceLogTable( PerformanceLogTable performanceLogTable ) { - this.performanceLogTable = performanceLogTable; - } + /** + * Sets the step performance capturing size limit. + * + * @param stepPerformanceCapturingSizeLimit the step performance capturing size limit to set + */ + public void setStepPerformanceCapturingSizeLimit(String stepPerformanceCapturingSizeLimit) { + this.stepPerformanceCapturingSizeLimit = stepPerformanceCapturingSizeLimit; + } - /** - * Gets the step log table for the transformation. - * - * @return the step log table for the transformation - */ - public StepLogTable getStepLogTable() { - return stepLogTable; - } + /** + * Clears the step fields and loop caches. + */ + public void clearCaches() { + clearStepFieldsCachce(); + clearLoopCache(); + } - /** - * Sets the step log table for the transformation. - * - * @param stepLogTable - * the step log table to set - */ - public void setStepLogTable( StepLogTable stepLogTable ) { - this.stepLogTable = stepLogTable; - } + /** + * Clears the step fields cachce. + */ + private void clearStepFieldsCachce() { + stepsFieldsCache.clear(); + } - /** - * Gets a list of the log tables (transformation, step, performance, channel) for the transformation. - * - * @return a list of LogTableInterfaces for the transformation - */ - public List getLogTables() { - List logTables = new ArrayList(); - logTables.add( transLogTable ); - logTables.add( stepLogTable ); - logTables.add( performanceLogTable ); - logTables.add( channelLogTable ); - logTables.add( metricsLogTable ); - return logTables; - } - - /** - * Gets the transformation type. - * - * @return the transformationType - */ - public TransformationType getTransformationType() { - return transformationType; - } + /** + * Clears the loop cache. + */ + private void clearLoopCache() { + loopCache.clear(); + } - /** - * Sets the transformation type. - * - * @param transformationType - * the transformationType to set - */ - public void setTransformationType( TransformationType transformationType ) { - this.transformationType = transformationType; - } + /** + * Gets the repository element type. + * + * @return the repository element type + * @see org.pentaho.di.repository.RepositoryElementInterface#getRepositoryElementType() + */ + public RepositoryObjectType getRepositoryElementType() { + return REPOSITORY_ELEMENT_TYPE; + } - /** - * Utility method to write the XML of this transformation to a file, mostly for testing purposes. - * - * @param filename - * The filename to save to - * @throws KettleXMLException - * in case something goes wrong. - */ - public void writeXML( String filename ) throws KettleXMLException { - FileOutputStream fos = null; - try { - fos = new FileOutputStream( filename ); - fos.write( XMLHandler.getXMLHeader().getBytes( Const.XML_ENCODING ) ); - fos.write( getXML().getBytes( Const.XML_ENCODING ) ); - } catch ( Exception e ) { - throw new KettleXMLException( "Unable to save to XML file '" + filename + "'", e ); - } finally { - if ( fos != null ) { + /** + * Gets the log channel. + * + * @return the log channel + */ + public LogChannelInterface getLogChannel() { + return log; + } + + /** + * Gets the log channel ID. + * + * @return the log channel ID + * @see org.pentaho.di.core.logging.LoggingObjectInterface#getLogChannelId() + */ + public String getLogChannelId() { + return log.getLogChannelId(); + } + + /** + * Gets the object type. + * + * @return the object type + * @see org.pentaho.di.core.logging.LoggingObjectInterface#getObjectType() + */ + public LoggingObjectType getObjectType() { + return LoggingObjectType.TRANSMETA; + } + + /** + * Gets the log table for the transformation. + * + * @return the log table for the transformation + */ + public TransLogTable getTransLogTable() { + return transLogTable; + } + + /** + * Sets the log table for the transformation. + * + * @param the log table to set + */ + public void setTransLogTable(TransLogTable transLogTable) { + this.transLogTable = transLogTable; + } + + /** + * Gets the performance log table for the transformation. + * + * @return the performance log table for the transformation + */ + public PerformanceLogTable getPerformanceLogTable() { + return performanceLogTable; + } + + /** + * Sets the performance log table for the transformation. + * + * @param performanceLogTable the performance log table to set + */ + public void setPerformanceLogTable(PerformanceLogTable performanceLogTable) { + this.performanceLogTable = performanceLogTable; + } + + /** + * Gets the step log table for the transformation. + * + * @return the step log table for the transformation + */ + public StepLogTable getStepLogTable() { + return stepLogTable; + } + + /** + * Sets the step log table for the transformation. + * + * @param stepLogTable the step log table to set + */ + public void setStepLogTable(StepLogTable stepLogTable) { + this.stepLogTable = stepLogTable; + } + + /** + * Gets a list of the log tables (transformation, step, performance, channel) for the transformation. + * + * @return a list of LogTableInterfaces for the transformation + */ + public List getLogTables() { + List logTables = new ArrayList(); + logTables.add(transLogTable); + logTables.add(stepLogTable); + logTables.add(performanceLogTable); + logTables.add(channelLogTable); + logTables.add(metricsLogTable); + return logTables; + } + + /** + * Gets the transformation type. + * + * @return the transformationType + */ + public TransformationType getTransformationType() { + return transformationType; + } + + /** + * Sets the transformation type. + * + * @param transformationType the transformationType to set + */ + public void setTransformationType(TransformationType transformationType) { + this.transformationType = transformationType; + } + + /** + * Utility method to write the XML of this transformation to a file, mostly for testing purposes. + * + * @param filename The filename to save to + * @throws KettleXMLException in case something goes wrong. + */ + public void writeXML(String filename) throws KettleXMLException { + FileOutputStream fos = null; try { - fos.close(); - } catch ( IOException e ) { - throw new KettleXMLException( "Unable to close file '" + filename + "'", e ); + fos = new FileOutputStream(filename); + fos.write(XMLHandler.getXMLHeader().getBytes(Const.XML_ENCODING)); + fos.write(getXML().getBytes(Const.XML_ENCODING)); + } catch (Exception e) { + throw new KettleXMLException("Unable to save to XML file '" + filename + "'", e); + } finally { + if (fos != null) { + try { + fos.close(); + } catch (IOException e) { + throw new KettleXMLException("Unable to close file '" + filename + "'", e); + } + } } - } } - } - /** - * Checks whether the transformation has repository references. - * - * @return true if the transformation has repository references, false otherwise - */ - public boolean hasRepositoryReferences() { - for ( StepMeta stepMeta : steps ) { - if ( stepMeta.getStepMetaInterface().hasRepositoryReferences() ) { - return true; - } + /** + * Checks whether the transformation has repository references. + * + * @return true if the transformation has repository references, false otherwise + */ + public boolean hasRepositoryReferences() { + for (StepMeta stepMeta : steps) { + if (stepMeta.getStepMetaInterface().hasRepositoryReferences()) { + return true; + } + } + return false; } - return false; - } - /** - * Looks up the references after a repository import. - * - * @param repository - * the repository to reference. - * @throws KettleException - * the kettle exception - */ - public void lookupRepositoryReferences( Repository repository ) throws KettleException { - for ( StepMeta stepMeta : steps ) { - stepMeta.getStepMetaInterface().lookupRepositoryReferences( repository ); + /** + * Looks up the references after a repository import. + * + * @param repository the repository to reference. + * @throws KettleException the kettle exception + */ + public void lookupRepositoryReferences(Repository repository) throws KettleException { + for (StepMeta stepMeta : steps) { + stepMeta.getStepMetaInterface().lookupRepositoryReferences(repository); + } } - } - /** - * @return the metricsLogTable - */ - public MetricsLogTable getMetricsLogTable() { - return metricsLogTable; - } + /** + * @return the metricsLogTable + */ + public MetricsLogTable getMetricsLogTable() { + return metricsLogTable; + } - /** - * @param metricsLogTable - * the metricsLogTable to set - */ - public void setMetricsLogTable( MetricsLogTable metricsLogTable ) { - this.metricsLogTable = metricsLogTable; - } - - @Override - public boolean isGatheringMetrics() { - return log.isGatheringMetrics(); - } - - @Override - public void setGatheringMetrics( boolean gatheringMetrics ) { - log.setGatheringMetrics( gatheringMetrics ); - } - - @Override - public boolean isForcingSeparateLogging() { - return log.isForcingSeparateLogging(); - } - - @Override - public void setForcingSeparateLogging( boolean forcingSeparateLogging ) { - log.setForcingSeparateLogging( forcingSeparateLogging ); - } - - /** - * This method needs to be called to store those objects which are used and referenced in the transformation metadata - * but not saved in the XML serialization. For example, the Kettle data service definition is referenced by name but - * not stored when getXML() is called. - * - * @param metaStore - * The store to save to - * @throws MetaStoreException - * in case there is an error. - */ - public void saveMetaStoreObjects( Repository repository, IMetaStore metaStore ) throws MetaStoreException { - - } - - public void addStepChangeListener( StepMetaChangeListenerInterface listener ) { - stepChangeListeners.add( listener ); - } - - public void addStepChangeListener( int p, StepMetaChangeListenerInterface list ) { - int indexListener = -1; - int indexListenerRemove = -1; - StepMeta rewriteStep = steps.get( p ); - StepMetaInterface iface = rewriteStep.getStepMetaInterface(); - if ( iface instanceof StepMetaChangeListenerInterface ) { - for ( StepMetaChangeListenerInterface listener : stepChangeListeners ) { - indexListener++; - if ( listener.equals( iface ) ) { - indexListenerRemove = indexListener; - } - } - if ( indexListenerRemove >= 0 ) { - stepChangeListeners.add( indexListenerRemove, list ); - } else if ( stepChangeListeners.size() == 0 && p == 0 ) { - stepChangeListeners.add( list ); - } - } - } - - public void removeStepChangeListener( StepMetaChangeListenerInterface list ) { - int indexListener = -1; - int indexListenerRemove = -1; - for ( StepMetaChangeListenerInterface listener : stepChangeListeners ) { - indexListener++; - if ( listener.equals( list ) ) { - indexListenerRemove = indexListener; - } - } - if ( indexListenerRemove >= 0 ) { - stepChangeListeners.remove( indexListenerRemove ); - } - } - - public void notifyAllListeners( StepMeta oldMeta, StepMeta newMeta ) { - for ( StepMetaChangeListenerInterface listener : stepChangeListeners ) { - listener.onStepChange( this, oldMeta, newMeta ); - } - } - - public boolean containsStepMeta( StepMeta stepMeta ) { - return steps.contains( stepMeta ); - } - - public List getMissingTrans() { - return missingTrans; - } - - public void addMissingTrans( MissingTrans trans ) { - if ( missingTrans == null ) { - missingTrans = new ArrayList(); - } - missingTrans.add( trans ); - } - - public void removeMissingTrans( MissingTrans trans ) { - if ( missingTrans != null && trans != null && missingTrans.contains( trans ) ) { - missingTrans.remove( trans ); - } - } - - public boolean hasMissingPlugins() { - return missingTrans != null && !missingTrans.isEmpty(); - } + /** + * @param metricsLogTable the metricsLogTable to set + */ + public void setMetricsLogTable(MetricsLogTable metricsLogTable) { + this.metricsLogTable = metricsLogTable; + } + + @Override + public boolean isGatheringMetrics() { + return log.isGatheringMetrics(); + } + + @Override + public void setGatheringMetrics(boolean gatheringMetrics) { + log.setGatheringMetrics(gatheringMetrics); + } + + @Override + public boolean isForcingSeparateLogging() { + return log.isForcingSeparateLogging(); + } + + @Override + public void setForcingSeparateLogging(boolean forcingSeparateLogging) { + log.setForcingSeparateLogging(forcingSeparateLogging); + } + + /** + * This method needs to be called to store those objects which are used and referenced in the transformation metadata + * but not saved in the XML serialization. For example, the Kettle data service definition is referenced by name but + * not stored when getXML() is called. + * + * @param metaStore The store to save to + * @throws MetaStoreException in case there is an error. + */ + public void saveMetaStoreObjects(Repository repository, IMetaStore metaStore) throws MetaStoreException { + + } + + public void addStepChangeListener(StepMetaChangeListenerInterface listener) { + stepChangeListeners.add(listener); + } + + public void addStepChangeListener(int p, StepMetaChangeListenerInterface list) { + int indexListener = -1; + int indexListenerRemove = -1; + StepMeta rewriteStep = steps.get(p); + StepMetaInterface iface = rewriteStep.getStepMetaInterface(); + if (iface instanceof StepMetaChangeListenerInterface) { + for (StepMetaChangeListenerInterface listener : stepChangeListeners) { + indexListener++; + if (listener.equals(iface)) { + indexListenerRemove = indexListener; + } + } + if (indexListenerRemove >= 0) { + stepChangeListeners.add(indexListenerRemove, list); + } else if (stepChangeListeners.size() == 0 && p == 0) { + stepChangeListeners.add(list); + } + } + } + + public void removeStepChangeListener(StepMetaChangeListenerInterface list) { + int indexListener = -1; + int indexListenerRemove = -1; + for (StepMetaChangeListenerInterface listener : stepChangeListeners) { + indexListener++; + if (listener.equals(list)) { + indexListenerRemove = indexListener; + } + } + if (indexListenerRemove >= 0) { + stepChangeListeners.remove(indexListenerRemove); + } + } + + public void notifyAllListeners(StepMeta oldMeta, StepMeta newMeta) { + for (StepMetaChangeListenerInterface listener : stepChangeListeners) { + listener.onStepChange(this, oldMeta, newMeta); + } + } + + public boolean containsStepMeta(StepMeta stepMeta) { + return steps.contains(stepMeta); + } + + public List getMissingTrans() { + return missingTrans; + } + + public void addMissingTrans(MissingTrans trans) { + if (missingTrans == null) { + missingTrans = new ArrayList(); + } + missingTrans.add(trans); + } + + public void removeMissingTrans(MissingTrans trans) { + if (missingTrans != null && trans != null && missingTrans.contains(trans)) { + missingTrans.remove(trans); + } + } + + public boolean hasMissingPlugins() { + return missingTrans != null && !missingTrans.isEmpty(); + } } diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/append/AppendMeta.java b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/append/AppendMeta.java index d48f367..c6cc0d7 100644 --- a/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/append/AppendMeta.java +++ b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/append/AppendMeta.java @@ -22,8 +22,6 @@ package org.pentaho.di.trans.steps.append; -import java.util.List; - import org.pentaho.di.core.CheckResult; import org.pentaho.di.core.CheckResultInterface; import org.pentaho.di.core.database.DatabaseMeta; @@ -39,13 +37,7 @@ import org.pentaho.di.trans.Trans; import org.pentaho.di.trans.TransMeta; import org.pentaho.di.trans.TransMeta.TransformationType; -import org.pentaho.di.trans.step.BaseStepMeta; -import org.pentaho.di.trans.step.StepDataInterface; -import org.pentaho.di.trans.step.StepIOMeta; -import org.pentaho.di.trans.step.StepIOMetaInterface; -import org.pentaho.di.trans.step.StepInterface; -import org.pentaho.di.trans.step.StepMeta; -import org.pentaho.di.trans.step.StepMetaInterface; +import org.pentaho.di.trans.step.*; import org.pentaho.di.trans.step.errorhandling.Stream; import org.pentaho.di.trans.step.errorhandling.StreamIcon; import org.pentaho.di.trans.step.errorhandling.StreamInterface; @@ -53,163 +45,165 @@ import org.pentaho.metastore.api.IMetaStore; import org.w3c.dom.Node; +import java.util.List; + /** * @author Sven Boden * @since 3-june-2007 */ public class AppendMeta extends BaseStepMeta implements StepMetaInterface { - private static Class PKG = Append.class; // for i18n purposes, needed by Translator2!! - - public AppendMeta() { - super(); // allocate BaseStepMeta - } - - public void loadXML( Node stepnode, List databases, IMetaStore metaStore ) throws KettleXMLException { - readData( stepnode ); - } - - public Object clone() { - AppendMeta retval = (AppendMeta) super.clone(); - - return retval; - } - - public String getXML() { - StringBuilder retval = new StringBuilder(); - - List infoStreams = getStepIOMeta().getInfoStreams(); - retval.append( XMLHandler.addTagValue( "head_name", infoStreams.get( 0 ).getStepname() ) ); - retval.append( XMLHandler.addTagValue( "tail_name", infoStreams.get( 1 ).getStepname() ) ); - - return retval.toString(); - } - - private void readData( Node stepnode ) throws KettleXMLException { - try { - List infoStreams = getStepIOMeta().getInfoStreams(); - StreamInterface headStream = infoStreams.get( 0 ); - StreamInterface tailStream = infoStreams.get( 1 ); - headStream.setSubject( XMLHandler.getTagValue( stepnode, "head_name" ) ); - tailStream.setSubject( XMLHandler.getTagValue( stepnode, "tail_name" ) ); - } catch ( Exception e ) { - throw new KettleXMLException( BaseMessages.getString( PKG, "AppendMeta.Exception.UnableToLoadStepInfo" ), e ); - } - } - - public void setDefault() { - } - - public void readRep( Repository rep, IMetaStore metaStore, ObjectId id_step, List databases ) throws KettleException { - try { - List infoStreams = getStepIOMeta().getInfoStreams(); - StreamInterface headStream = infoStreams.get( 0 ); - StreamInterface tailStream = infoStreams.get( 1 ); - headStream.setSubject( rep.getStepAttributeString( id_step, "head_name" ) ); - tailStream.setSubject( rep.getStepAttributeString( id_step, "tail_name" ) ); - } catch ( Exception e ) { - throw new KettleException( BaseMessages.getString( - PKG, "AppendMeta.Exception.UnexpectedErrorReadingStepInfo" ), e ); - } - } - - public void saveRep( Repository rep, IMetaStore metaStore, ObjectId id_transformation, ObjectId id_step ) throws KettleException { - try { - List infoStreams = getStepIOMeta().getInfoStreams(); - StreamInterface headStream = infoStreams.get( 0 ); - StreamInterface tailStream = infoStreams.get( 1 ); - rep.saveStepAttribute( id_transformation, id_step, "head_name", headStream.getStepname() ); - rep.saveStepAttribute( id_transformation, id_step, "tail_name", tailStream.getStepname() ); - } catch ( Exception e ) { - throw new KettleException( BaseMessages.getString( PKG, "AppendMeta.Exception.UnableToSaveStepInfo" ) - + id_step, e ); - } - } - - @Override - public void searchInfoAndTargetSteps( List steps ) { - for ( StreamInterface stream : getStepIOMeta().getInfoStreams() ) { - stream.setStepMeta( StepMeta.findStep( steps, (String) stream.getSubject() ) ); - } - } - - public boolean chosesTargetSteps() { - return false; - } - - public String[] getTargetSteps() { - return null; - } - - public void getFields( RowMetaInterface r, String name, RowMetaInterface[] info, StepMeta nextStep, - VariableSpace space, Repository repository, IMetaStore metaStore ) throws KettleStepException { - // We don't have any input fields here in "r" as they are all info fields. - // So we just take the info fields. - // - if ( info != null ) { - if ( info.length > 0 && info[0] != null ) { - r.mergeRowMeta( info[0] ); - } - } - } - - public void check( List remarks, TransMeta transMeta, StepMeta stepMeta, - RowMetaInterface prev, String[] input, String[] output, RowMetaInterface info, VariableSpace space, - Repository repository, IMetaStore metaStore ) { - CheckResult cr; - - List infoStreams = getStepIOMeta().getInfoStreams(); - StreamInterface headStream = infoStreams.get( 0 ); - StreamInterface tailStream = infoStreams.get( 1 ); - - if ( headStream.getStepname() != null && tailStream.getStepname() != null ) { - cr = - new CheckResult( CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( - PKG, "AppendMeta.CheckResult.SourceStepsOK" ), stepMeta ); - remarks.add( cr ); - } else if ( headStream.getStepname() == null && tailStream.getStepname() == null ) { - cr = - new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages.getString( - PKG, "AppendMeta.CheckResult.SourceStepsMissing" ), stepMeta ); - remarks.add( cr ); - } else { - cr = - new CheckResult( CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( - PKG, "AppendMeta.CheckResult.OneSourceStepMissing" ), stepMeta ); - remarks.add( cr ); - } - } - - public StepInterface getStep( StepMeta stepMeta, StepDataInterface stepDataInterface, int cnr, TransMeta tr, - Trans trans ) { - return new Append( stepMeta, stepDataInterface, cnr, tr, trans ); - } - - public StepDataInterface getStepData() { - return new AppendData(); - } - - /** - * Returns the Input/Output metadata for this step. - */ - public StepIOMetaInterface getStepIOMeta() { - if ( ioMeta == null ) { - - ioMeta = new StepIOMeta( true, true, false, false, false, false ); - - ioMeta.addStream( new Stream( StreamType.INFO, null, BaseMessages.getString( - PKG, "AppendMeta.InfoStream.FirstStream.Description" ), StreamIcon.INFO, null ) ); - ioMeta.addStream( new Stream( StreamType.INFO, null, BaseMessages.getString( - PKG, "AppendMeta.InfoStream.SecondStream.Description" ), StreamIcon.INFO, null ) ); - } - - return ioMeta; - } - - @Override - public void resetStepIoMeta() { - } - - public TransformationType[] getSupportedTransformationTypes() { - return new TransformationType[] { TransformationType.Normal, }; - } + private static Class PKG = Append.class; // for i18n purposes, needed by Translator2!! + + public AppendMeta() { + super(); // allocate BaseStepMeta + } + + public void loadXML(Node stepnode, List databases, IMetaStore metaStore) throws KettleXMLException { + readData(stepnode); + } + + public Object clone() { + AppendMeta retval = (AppendMeta) super.clone(); + + return retval; + } + + public String getXML() { + StringBuilder retval = new StringBuilder(); + + List infoStreams = getStepIOMeta().getInfoStreams(); + retval.append(XMLHandler.addTagValue("head_name", infoStreams.get(0).getStepname())); + retval.append(XMLHandler.addTagValue("tail_name", infoStreams.get(1).getStepname())); + + return retval.toString(); + } + + private void readData(Node stepnode) throws KettleXMLException { + try { + List infoStreams = getStepIOMeta().getInfoStreams(); + StreamInterface headStream = infoStreams.get(0); + StreamInterface tailStream = infoStreams.get(1); + headStream.setSubject(XMLHandler.getTagValue(stepnode, "head_name")); + tailStream.setSubject(XMLHandler.getTagValue(stepnode, "tail_name")); + } catch (Exception e) { + throw new KettleXMLException(BaseMessages.getString(PKG, "AppendMeta.Exception.UnableToLoadStepInfo"), e); + } + } + + public void setDefault() { + } + + public void readRep(Repository rep, IMetaStore metaStore, ObjectId id_step, List databases) throws KettleException { + try { + List infoStreams = getStepIOMeta().getInfoStreams(); + StreamInterface headStream = infoStreams.get(0); + StreamInterface tailStream = infoStreams.get(1); + headStream.setSubject(rep.getStepAttributeString(id_step, "head_name")); + tailStream.setSubject(rep.getStepAttributeString(id_step, "tail_name")); + } catch (Exception e) { + throw new KettleException(BaseMessages.getString( + PKG, "AppendMeta.Exception.UnexpectedErrorReadingStepInfo"), e); + } + } + + public void saveRep(Repository rep, IMetaStore metaStore, ObjectId id_transformation, ObjectId id_step) throws KettleException { + try { + List infoStreams = getStepIOMeta().getInfoStreams(); + StreamInterface headStream = infoStreams.get(0); + StreamInterface tailStream = infoStreams.get(1); + rep.saveStepAttribute(id_transformation, id_step, "head_name", headStream.getStepname()); + rep.saveStepAttribute(id_transformation, id_step, "tail_name", tailStream.getStepname()); + } catch (Exception e) { + throw new KettleException(BaseMessages.getString(PKG, "AppendMeta.Exception.UnableToSaveStepInfo") + + id_step, e); + } + } + + @Override + public void searchInfoAndTargetSteps(List steps) { + for (StreamInterface stream : getStepIOMeta().getInfoStreams()) { + stream.setStepMeta(StepMeta.findStep(steps, (String) stream.getSubject())); + } + } + + public boolean chosesTargetSteps() { + return false; + } + + public String[] getTargetSteps() { + return null; + } + + public void getFields(RowMetaInterface r, String name, RowMetaInterface[] info, StepMeta nextStep, + VariableSpace space, Repository repository, IMetaStore metaStore) throws KettleStepException { + // We don't have any input fields here in "r" as they are all info fields. + // So we just take the info fields. + // + if (info != null) { + if (info.length > 0 && info[0] != null) { + r.mergeRowMeta(info[0]); + } + } + } + + public void check(List remarks, TransMeta transMeta, StepMeta stepMeta, + RowMetaInterface prev, String[] input, String[] output, RowMetaInterface info, VariableSpace space, + Repository repository, IMetaStore metaStore) { + CheckResult cr; + + List infoStreams = getStepIOMeta().getInfoStreams(); + StreamInterface headStream = infoStreams.get(0); + StreamInterface tailStream = infoStreams.get(1); + + if (headStream.getStepname() != null && tailStream.getStepname() != null) { + cr = + new CheckResult(CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( + PKG, "AppendMeta.CheckResult.SourceStepsOK"), stepMeta); + remarks.add(cr); + } else if (headStream.getStepname() == null && tailStream.getStepname() == null) { + cr = + new CheckResult(CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages.getString( + PKG, "AppendMeta.CheckResult.SourceStepsMissing"), stepMeta); + remarks.add(cr); + } else { + cr = + new CheckResult(CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( + PKG, "AppendMeta.CheckResult.OneSourceStepMissing"), stepMeta); + remarks.add(cr); + } + } + + public StepInterface getStep(StepMeta stepMeta, StepDataInterface stepDataInterface, int cnr, TransMeta tr, + Trans trans) { + return new Append(stepMeta, stepDataInterface, cnr, tr, trans); + } + + public StepDataInterface getStepData() { + return new AppendData(); + } + + /** + * Returns the Input/Output metadata for this step. + */ + public StepIOMetaInterface getStepIOMeta() { + if (ioMeta == null) { + + ioMeta = new StepIOMeta(true, true, false, false, false, false); + + ioMeta.addStream(new Stream(StreamType.INFO, null, BaseMessages.getString( + PKG, "AppendMeta.InfoStream.FirstStream.Description"), StreamIcon.INFO, null)); + ioMeta.addStream(new Stream(StreamType.INFO, null, BaseMessages.getString( + PKG, "AppendMeta.InfoStream.SecondStream.Description"), StreamIcon.INFO, null)); + } + + return ioMeta; + } + + @Override + public void resetStepIoMeta() { + } + + public TransformationType[] getSupportedTransformationTypes() { + return new TransformationType[]{TransformationType.Normal,}; + } } diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/filterrows/FilterRowsMeta.java b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/filterrows/FilterRowsMeta.java index 09189b5..509e401 100644 --- a/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/filterrows/FilterRowsMeta.java +++ b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/filterrows/FilterRowsMeta.java @@ -22,9 +22,6 @@ package org.pentaho.di.trans.steps.filterrows; -import java.util.ArrayList; -import java.util.List; - import org.pentaho.di.core.CheckResult; import org.pentaho.di.core.CheckResultInterface; import org.pentaho.di.core.Condition; @@ -43,13 +40,7 @@ import org.pentaho.di.repository.Repository; import org.pentaho.di.trans.Trans; import org.pentaho.di.trans.TransMeta; -import org.pentaho.di.trans.step.BaseStepMeta; -import org.pentaho.di.trans.step.StepDataInterface; -import org.pentaho.di.trans.step.StepIOMeta; -import org.pentaho.di.trans.step.StepIOMetaInterface; -import org.pentaho.di.trans.step.StepInterface; -import org.pentaho.di.trans.step.StepMeta; -import org.pentaho.di.trans.step.StepMetaInterface; +import org.pentaho.di.trans.step.*; import org.pentaho.di.trans.step.errorhandling.Stream; import org.pentaho.di.trans.step.errorhandling.StreamIcon; import org.pentaho.di.trans.step.errorhandling.StreamInterface; @@ -57,370 +48,372 @@ import org.pentaho.metastore.api.IMetaStore; import org.w3c.dom.Node; +import java.util.ArrayList; +import java.util.List; + /* * Created on 02-jun-2003 * */ public class FilterRowsMeta extends BaseStepMeta implements StepMetaInterface { - private static Class PKG = FilterRowsMeta.class; // for i18n purposes, needed by Translator2!! - - /** - * This is the main condition for the complete filter. - * - * @since version 2.1 - */ - private Condition condition; - - public FilterRowsMeta() { - super(); // allocate BaseStepMeta - condition = new Condition(); - } - - public void loadXML( Node stepnode, List databases, IMetaStore metaStore ) throws KettleXMLException { - readData( stepnode ); - } - - /** - * @return Returns the condition. - */ - public Condition getCondition() { - return condition; - } - - /** - * @param condition - * The condition to set. - */ - public void setCondition( Condition condition ) { - this.condition = condition; - } - - public void allocate() { - condition = new Condition(); - } - - public Object clone() { - FilterRowsMeta retval = (FilterRowsMeta) super.clone(); - - if ( condition != null ) { - retval.condition = (Condition) condition.clone(); - } else { - retval.condition = null; + private static Class PKG = FilterRowsMeta.class; // for i18n purposes, needed by Translator2!! + + /** + * This is the main condition for the complete filter. + * + * @since version 2.1 + */ + private Condition condition; + + public FilterRowsMeta() { + super(); // allocate BaseStepMeta + condition = new Condition(); } - return retval; - } + public void loadXML(Node stepnode, List databases, IMetaStore metaStore) throws KettleXMLException { + readData(stepnode); + } - public String getXML() throws KettleException { - StringBuffer retval = new StringBuffer( 200 ); + /** + * @return Returns the condition. + */ + public Condition getCondition() { + return condition; + } - List targetStreams = getStepIOMeta().getTargetStreams(); - retval.append( XMLHandler.addTagValue( "send_true_to", targetStreams.get( 0 ).getStepname() ) ); - retval.append( XMLHandler.addTagValue( "send_false_to", targetStreams.get( 1 ).getStepname() ) ); - retval.append( " " ).append( Const.CR ); + /** + * @param condition The condition to set. + */ + public void setCondition(Condition condition) { + this.condition = condition; + } - if ( condition != null ) { - retval.append( condition.getXML() ); + public void allocate() { + condition = new Condition(); } - retval.append( " " ).append( Const.CR ); + public Object clone() { + FilterRowsMeta retval = (FilterRowsMeta) super.clone(); + + if (condition != null) { + retval.condition = (Condition) condition.clone(); + } else { + retval.condition = null; + } - return retval.toString(); - } + return retval; + } - private void readData( Node stepnode ) throws KettleXMLException { - try { - List targetStreams = getStepIOMeta().getTargetStreams(); + public String getXML() throws KettleException { + StringBuffer retval = new StringBuffer(200); - targetStreams.get( 0 ).setSubject( XMLHandler.getTagValue( stepnode, "send_true_to" ) ); - targetStreams.get( 1 ).setSubject( XMLHandler.getTagValue( stepnode, "send_false_to" ) ); + List targetStreams = getStepIOMeta().getTargetStreams(); + retval.append(XMLHandler.addTagValue("send_true_to", targetStreams.get(0).getStepname())); + retval.append(XMLHandler.addTagValue("send_false_to", targetStreams.get(1).getStepname())); + retval.append(" ").append(Const.CR); - Node compare = XMLHandler.getSubNode( stepnode, "compare" ); - Node condnode = XMLHandler.getSubNode( compare, "condition" ); + if (condition != null) { + retval.append(condition.getXML()); + } - // The new situation... - if ( condnode != null ) { - condition = new Condition( condnode ); - } else { - // Old style condition: Line1 OR Line2 OR Line3: @deprecated! - condition = new Condition(); + retval.append(" ").append(Const.CR); - int nrkeys = XMLHandler.countNodes( compare, "key" ); - if ( nrkeys == 1 ) { - Node knode = XMLHandler.getSubNodeByNr( compare, "key", 0 ); + return retval.toString(); + } - String key = XMLHandler.getTagValue( knode, "name" ); - String value = XMLHandler.getTagValue( knode, "value" ); - String field = XMLHandler.getTagValue( knode, "field" ); - String comparator = XMLHandler.getTagValue( knode, "condition" ); + private void readData(Node stepnode) throws KettleXMLException { + try { + List targetStreams = getStepIOMeta().getTargetStreams(); - condition.setOperator( Condition.OPERATOR_NONE ); - condition.setLeftValuename( key ); - condition.setFunction( Condition.getFunction( comparator ) ); - condition.setRightValuename( field ); - condition.setRightExact( new ValueMetaAndData( "value", value ) ); - } else { - for ( int i = 0; i < nrkeys; i++ ) { - Node knode = XMLHandler.getSubNodeByNr( compare, "key", i ); + targetStreams.get(0).setSubject(XMLHandler.getTagValue(stepnode, "send_true_to")); + targetStreams.get(1).setSubject(XMLHandler.getTagValue(stepnode, "send_false_to")); - String key = XMLHandler.getTagValue( knode, "name" ); - String value = XMLHandler.getTagValue( knode, "value" ); - String field = XMLHandler.getTagValue( knode, "field" ); - String comparator = XMLHandler.getTagValue( knode, "condition" ); + Node compare = XMLHandler.getSubNode(stepnode, "compare"); + Node condnode = XMLHandler.getSubNode(compare, "condition"); - Condition subc = new Condition(); - if ( i > 0 ) { - subc.setOperator( Condition.OPERATOR_OR ); + // The new situation... + if (condnode != null) { + condition = new Condition(condnode); } else { - subc.setOperator( Condition.OPERATOR_NONE ); + // Old style condition: Line1 OR Line2 OR Line3: @deprecated! + condition = new Condition(); + + int nrkeys = XMLHandler.countNodes(compare, "key"); + if (nrkeys == 1) { + Node knode = XMLHandler.getSubNodeByNr(compare, "key", 0); + + String key = XMLHandler.getTagValue(knode, "name"); + String value = XMLHandler.getTagValue(knode, "value"); + String field = XMLHandler.getTagValue(knode, "field"); + String comparator = XMLHandler.getTagValue(knode, "condition"); + + condition.setOperator(Condition.OPERATOR_NONE); + condition.setLeftValuename(key); + condition.setFunction(Condition.getFunction(comparator)); + condition.setRightValuename(field); + condition.setRightExact(new ValueMetaAndData("value", value)); + } else { + for (int i = 0; i < nrkeys; i++) { + Node knode = XMLHandler.getSubNodeByNr(compare, "key", i); + + String key = XMLHandler.getTagValue(knode, "name"); + String value = XMLHandler.getTagValue(knode, "value"); + String field = XMLHandler.getTagValue(knode, "field"); + String comparator = XMLHandler.getTagValue(knode, "condition"); + + Condition subc = new Condition(); + if (i > 0) { + subc.setOperator(Condition.OPERATOR_OR); + } else { + subc.setOperator(Condition.OPERATOR_NONE); + } + subc.setLeftValuename(key); + subc.setFunction(Condition.getFunction(comparator)); + subc.setRightValuename(field); + subc.setRightExact(new ValueMetaAndData("value", value)); + + condition.addCondition(subc); + } + } } - subc.setLeftValuename( key ); - subc.setFunction( Condition.getFunction( comparator ) ); - subc.setRightValuename( field ); - subc.setRightExact( new ValueMetaAndData( "value", value ) ); - - condition.addCondition( subc ); - } + } catch (Exception e) { + throw new KettleXMLException(BaseMessages.getString( + PKG, "FilterRowsMeta.Exception..UnableToLoadStepInfoFromXML"), e); } - } - } catch ( Exception e ) { - throw new KettleXMLException( BaseMessages.getString( - PKG, "FilterRowsMeta.Exception..UnableToLoadStepInfoFromXML" ), e ); } - } - public void setDefault() { - allocate(); - } + public void setDefault() { + allocate(); + } - public void readRep( Repository rep, IMetaStore metaStore, ObjectId id_step, List databases ) throws KettleException { - try { - allocate(); + public void readRep(Repository rep, IMetaStore metaStore, ObjectId id_step, List databases) throws KettleException { + try { + allocate(); - List targetStreams = getStepIOMeta().getTargetStreams(); + List targetStreams = getStepIOMeta().getTargetStreams(); - targetStreams.get( 0 ).setSubject( rep.getStepAttributeString( id_step, "send_true_to" ) ); - targetStreams.get( 1 ).setSubject( rep.getStepAttributeString( id_step, "send_false_to" ) ); + targetStreams.get(0).setSubject(rep.getStepAttributeString(id_step, "send_true_to")); + targetStreams.get(1).setSubject(rep.getStepAttributeString(id_step, "send_false_to")); - condition = rep.loadConditionFromStepAttribute( id_step, "id_condition" ); + condition = rep.loadConditionFromStepAttribute(id_step, "id_condition"); - } catch ( Exception e ) { - throw new KettleException( BaseMessages.getString( - PKG, "FilterRowsMeta.Exception.UnexpectedErrorInReadingStepInfoFromRepository" ), e ); + } catch (Exception e) { + throw new KettleException(BaseMessages.getString( + PKG, "FilterRowsMeta.Exception.UnexpectedErrorInReadingStepInfoFromRepository"), e); + } } - } - @Override - public void searchInfoAndTargetSteps( List steps ) { - for ( StreamInterface stream : getStepIOMeta().getTargetStreams() ) { - stream.setStepMeta( StepMeta.findStep( steps, (String) stream.getSubject() ) ); + @Override + public void searchInfoAndTargetSteps(List steps) { + for (StreamInterface stream : getStepIOMeta().getTargetStreams()) { + stream.setStepMeta(StepMeta.findStep(steps, (String) stream.getSubject())); + } } - } - public void saveRep( Repository rep, IMetaStore metaStore, ObjectId id_transformation, ObjectId id_step ) throws KettleException { - try { - if ( condition != null ) { - List targetStreams = getStepIOMeta().getTargetStreams(); + public void saveRep(Repository rep, IMetaStore metaStore, ObjectId id_transformation, ObjectId id_step) throws KettleException { + try { + if (condition != null) { + List targetStreams = getStepIOMeta().getTargetStreams(); - rep.saveConditionStepAttribute( id_transformation, id_step, "id_condition", condition ); - rep.saveStepAttribute( id_transformation, id_step, "send_true_to", targetStreams.get( 0 ).getStepname() ); - rep.saveStepAttribute( id_transformation, id_step, "send_false_to", targetStreams.get( 1 ).getStepname() ); - } - } catch ( Exception e ) { - throw new KettleException( BaseMessages.getString( - PKG, "FilterRowsMeta.Exception.UnableToSaveStepInfoToRepository" ) - + id_step, e ); - } - } - - public void getFields( RowMetaInterface rowMeta, String origin, RowMetaInterface[] info, StepMeta nextStep, - VariableSpace space, Repository repository, IMetaStore metaStore ) throws KettleStepException { - // Clear the sortedDescending flag on fields used within the condition - otherwise the comparisons will be - // inverted!! - String[] conditionField = condition.getUsedFields(); - for ( int i = 0; i < conditionField.length; i++ ) { - int idx = rowMeta.indexOfValue( conditionField[i] ); - if ( idx >= 0 ) { - ValueMetaInterface valueMeta = rowMeta.getValueMeta( idx ); - valueMeta.setSortedDescending( false ); - } - } - } - - public void check( List remarks, TransMeta transMeta, StepMeta stepMeta, - RowMetaInterface prev, String[] input, String[] output, RowMetaInterface info, VariableSpace space, - Repository repository, IMetaStore metaStore ) { - CheckResult cr; - String error_message = ""; - - List targetStreams = getStepIOMeta().getTargetStreams(); - - if ( targetStreams.get( 0 ).getStepname() != null ) { - int trueTargetIdx = Const.indexOfString( targetStreams.get( 0 ).getStepname(), output ); - if ( trueTargetIdx < 0 ) { - cr = - new CheckResult( - CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages.getString( - PKG, "FilterRowsMeta.CheckResult.TargetStepInvalid", "true", targetStreams - .get( 0 ).getStepname() ), stepMeta ); - remarks.add( cr ); - } + rep.saveConditionStepAttribute(id_transformation, id_step, "id_condition", condition); + rep.saveStepAttribute(id_transformation, id_step, "send_true_to", targetStreams.get(0).getStepname()); + rep.saveStepAttribute(id_transformation, id_step, "send_false_to", targetStreams.get(1).getStepname()); + } + } catch (Exception e) { + throw new KettleException(BaseMessages.getString( + PKG, "FilterRowsMeta.Exception.UnableToSaveStepInfoToRepository") + + id_step, e); + } } - if ( targetStreams.get( 1 ).getStepname() != null ) { - int falseTargetIdx = Const.indexOfString( targetStreams.get( 1 ).getStepname(), output ); - if ( falseTargetIdx < 0 ) { - cr = - new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages - .getString( PKG, "FilterRowsMeta.CheckResult.TargetStepInvalid", "false", targetStreams - .get( 1 ).getStepname() ), stepMeta ); - remarks.add( cr ); - } + public void getFields(RowMetaInterface rowMeta, String origin, RowMetaInterface[] info, StepMeta nextStep, + VariableSpace space, Repository repository, IMetaStore metaStore) throws KettleStepException { + // Clear the sortedDescending flag on fields used within the condition - otherwise the comparisons will be + // inverted!! + String[] conditionField = condition.getUsedFields(); + for (int i = 0; i < conditionField.length; i++) { + int idx = rowMeta.indexOfValue(conditionField[i]); + if (idx >= 0) { + ValueMetaInterface valueMeta = rowMeta.getValueMeta(idx); + valueMeta.setSortedDescending(false); + } + } } - if ( condition.isEmpty() ) { - cr = - new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages.getString( - PKG, "FilterRowsMeta.CheckResult.NoConditionSpecified" ), stepMeta ); - } else { - cr = - new CheckResult( CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( - PKG, "FilterRowsMeta.CheckResult.ConditionSpecified" ), stepMeta ); - } - remarks.add( cr ); - - // Look up fields in the input stream - if ( prev != null && prev.size() > 0 ) { - cr = - new CheckResult( CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( - PKG, "FilterRowsMeta.CheckResult.StepReceivingFields", prev.size() + "" ), stepMeta ); - remarks.add( cr ); - - List orphanFields = getOrphanFields( condition, prev ); - if ( orphanFields.size() > 0 ) { - error_message = BaseMessages.getString( PKG, "FilterRowsMeta.CheckResult.FieldsNotFoundFromPreviousStep" ) - + Const.CR; - for ( String field : orphanFields ) { - error_message += "\t\t" + field + Const.CR; + public void check(List remarks, TransMeta transMeta, StepMeta stepMeta, + RowMetaInterface prev, String[] input, String[] output, RowMetaInterface info, VariableSpace space, + Repository repository, IMetaStore metaStore) { + CheckResult cr; + String error_message = ""; + + List targetStreams = getStepIOMeta().getTargetStreams(); + + if (targetStreams.get(0).getStepname() != null) { + int trueTargetIdx = Const.indexOfString(targetStreams.get(0).getStepname(), output); + if (trueTargetIdx < 0) { + cr = + new CheckResult( + CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages.getString( + PKG, "FilterRowsMeta.CheckResult.TargetStepInvalid", "true", targetStreams + .get(0).getStepname()), stepMeta); + remarks.add(cr); + } + } + + if (targetStreams.get(1).getStepname() != null) { + int falseTargetIdx = Const.indexOfString(targetStreams.get(1).getStepname(), output); + if (falseTargetIdx < 0) { + cr = + new CheckResult(CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages + .getString(PKG, "FilterRowsMeta.CheckResult.TargetStepInvalid", "false", targetStreams + .get(1).getStepname()), stepMeta); + remarks.add(cr); + } + } + + if (condition.isEmpty()) { + cr = + new CheckResult(CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages.getString( + PKG, "FilterRowsMeta.CheckResult.NoConditionSpecified"), stepMeta); + } else { + cr = + new CheckResult(CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( + PKG, "FilterRowsMeta.CheckResult.ConditionSpecified"), stepMeta); + } + remarks.add(cr); + + // Look up fields in the input stream + if (prev != null && prev.size() > 0) { + cr = + new CheckResult(CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( + PKG, "FilterRowsMeta.CheckResult.StepReceivingFields", prev.size() + ""), stepMeta); + remarks.add(cr); + + List orphanFields = getOrphanFields(condition, prev); + if (orphanFields.size() > 0) { + error_message = BaseMessages.getString(PKG, "FilterRowsMeta.CheckResult.FieldsNotFoundFromPreviousStep") + + Const.CR; + for (String field : orphanFields) { + error_message += "\t\t" + field + Const.CR; + } + cr = new CheckResult(CheckResultInterface.TYPE_RESULT_ERROR, error_message, stepMeta); + } else { + cr = + new CheckResult(CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString(PKG, + "FilterRowsMeta.CheckResult.AllFieldsFoundInInputStream"), stepMeta); + } + remarks.add(cr); + } else { + error_message = + BaseMessages.getString(PKG, "FilterRowsMeta.CheckResult.CouldNotReadFieldsFromPreviousStep") + + Const.CR; + cr = new CheckResult(CheckResultInterface.TYPE_RESULT_ERROR, error_message, stepMeta); + remarks.add(cr); + } + + // See if we have input streams leading to this step! + if (input.length > 0) { + cr = + new CheckResult(CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( + PKG, "FilterRowsMeta.CheckResult.StepReceivingInfoFromOtherSteps"), stepMeta); + remarks.add(cr); + } else { + cr = + new CheckResult(CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages.getString( + PKG, "FilterRowsMeta.CheckResult.NoInputReceivedFromOtherSteps"), stepMeta); + remarks.add(cr); } - cr = new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, error_message, stepMeta ); - } else { - cr = - new CheckResult( CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( PKG, - "FilterRowsMeta.CheckResult.AllFieldsFoundInInputStream" ), stepMeta ); - } - remarks.add( cr ); - } else { - error_message = - BaseMessages.getString( PKG, "FilterRowsMeta.CheckResult.CouldNotReadFieldsFromPreviousStep" ) - + Const.CR; - cr = new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, error_message, stepMeta ); - remarks.add( cr ); } - // See if we have input streams leading to this step! - if ( input.length > 0 ) { - cr = - new CheckResult( CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( - PKG, "FilterRowsMeta.CheckResult.StepReceivingInfoFromOtherSteps" ), stepMeta ); - remarks.add( cr ); - } else { - cr = - new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages.getString( - PKG, "FilterRowsMeta.CheckResult.NoInputReceivedFromOtherSteps" ), stepMeta ); - remarks.add( cr ); + public StepInterface getStep(StepMeta stepMeta, StepDataInterface stepDataInterface, int cnr, TransMeta tr, + Trans trans) { + return new FilterRows(stepMeta, stepDataInterface, cnr, tr, trans); } - } - public StepInterface getStep( StepMeta stepMeta, StepDataInterface stepDataInterface, int cnr, TransMeta tr, - Trans trans ) { - return new FilterRows( stepMeta, stepDataInterface, cnr, tr, trans ); - } + public StepDataInterface getStepData() { + return new FilterRowsData(); + } - public StepDataInterface getStepData() { - return new FilterRowsData(); - } + /** + * Returns the Input/Output metadata for this step. + */ + public StepIOMetaInterface getStepIOMeta() { + if (ioMeta == null) { - /** - * Returns the Input/Output metadata for this step. - */ - public StepIOMetaInterface getStepIOMeta() { - if ( ioMeta == null ) { + ioMeta = new StepIOMeta(true, true, false, false, false, false); - ioMeta = new StepIOMeta( true, true, false, false, false, false ); + ioMeta.addStream(new Stream(StreamType.TARGET, null, BaseMessages.getString( + PKG, "FilterRowsMeta.InfoStream.True.Description"), StreamIcon.TRUE, null)); + ioMeta.addStream(new Stream(StreamType.TARGET, null, BaseMessages.getString( + PKG, "FilterRowsMeta.InfoStream.False.Description"), StreamIcon.FALSE, null)); + } - ioMeta.addStream( new Stream( StreamType.TARGET, null, BaseMessages.getString( - PKG, "FilterRowsMeta.InfoStream.True.Description" ), StreamIcon.TRUE, null ) ); - ioMeta.addStream( new Stream( StreamType.TARGET, null, BaseMessages.getString( - PKG, "FilterRowsMeta.InfoStream.False.Description" ), StreamIcon.FALSE, null ) ); + return ioMeta; } - return ioMeta; - } - - @Override - public void resetStepIoMeta() { - } - - /** - * When an optional stream is selected, this method is called to handled the ETL metadata implications of that. - * - * @param stream - * The optional stream to handle. - */ - public void handleStreamSelection( StreamInterface stream ) { - // This step targets another step. - // Make sure that we don't specify the same step for true and false... - // If the user requests false, we blank out true and vice versa - // - List targets = getStepIOMeta().getTargetStreams(); - int index = targets.indexOf( stream ); - if ( index == 0 ) { - // True - // - StepMeta falseStep = targets.get( 1 ).getStepMeta(); - if ( falseStep != null && falseStep.equals( stream.getStepMeta() ) ) { - targets.get( 1 ).setStepMeta( null ); - } + @Override + public void resetStepIoMeta() { } - if ( index == 1 ) { - // False - // - StepMeta trueStep = targets.get( 0 ).getStepMeta(); - if ( trueStep != null && trueStep.equals( stream.getStepMeta() ) ) { - targets.get( 0 ).setStepMeta( null ); - } + + /** + * When an optional stream is selected, this method is called to handled the ETL metadata implications of that. + * + * @param stream The optional stream to handle. + */ + public void handleStreamSelection(StreamInterface stream) { + // This step targets another step. + // Make sure that we don't specify the same step for true and false... + // If the user requests false, we blank out true and vice versa + // + List targets = getStepIOMeta().getTargetStreams(); + int index = targets.indexOf(stream); + if (index == 0) { + // True + // + StepMeta falseStep = targets.get(1).getStepMeta(); + if (falseStep != null && falseStep.equals(stream.getStepMeta())) { + targets.get(1).setStepMeta(null); + } + } + if (index == 1) { + // False + // + StepMeta trueStep = targets.get(0).getStepMeta(); + if (trueStep != null && trueStep.equals(stream.getStepMeta())) { + targets.get(0).setStepMeta(null); + } + } } - } - - @Override - public boolean excludeFromCopyDistributeVerification() { - return true; - } - - /** - * Get non-existing referenced input fields - * @param condition - * @param prev - * @return - */ - public List getOrphanFields( Condition condition, RowMetaInterface prev ) { - List orphans = new ArrayList( ); - if ( condition == null || prev == null ) { - return orphans; + + @Override + public boolean excludeFromCopyDistributeVerification() { + return true; } - String[] key = condition.getUsedFields(); - for ( int i = 0; i < key.length; i++ ) { - if ( Const.isEmpty( key[i] ) ) { - continue; - } - ValueMetaInterface v = prev.searchValueMeta( key[i] ); - if ( v == null ) { - orphans.add( key[i] ); - } + + /** + * Get non-existing referenced input fields + * + * @param condition + * @param prev + * @return + */ + public List getOrphanFields(Condition condition, RowMetaInterface prev) { + List orphans = new ArrayList(); + if (condition == null || prev == null) { + return orphans; + } + String[] key = condition.getUsedFields(); + for (int i = 0; i < key.length; i++) { + if (Const.isEmpty(key[i])) { + continue; + } + ValueMetaInterface v = prev.searchValueMeta(key[i]); + if (v == null) { + orphans.add(key[i]); + } + } + return orphans; } - return orphans; - } } diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/javafilter/JavaFilterMeta.java b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/javafilter/JavaFilterMeta.java index c3b74e5..9e0f35e 100644 --- a/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/javafilter/JavaFilterMeta.java +++ b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/javafilter/JavaFilterMeta.java @@ -22,8 +22,6 @@ package org.pentaho.di.trans.steps.javafilter; -import java.util.List; - import org.pentaho.di.core.CheckResult; import org.pentaho.di.core.CheckResultInterface; import org.pentaho.di.core.Const; @@ -38,13 +36,7 @@ import org.pentaho.di.repository.Repository; import org.pentaho.di.trans.Trans; import org.pentaho.di.trans.TransMeta; -import org.pentaho.di.trans.step.BaseStepMeta; -import org.pentaho.di.trans.step.StepDataInterface; -import org.pentaho.di.trans.step.StepIOMeta; -import org.pentaho.di.trans.step.StepIOMetaInterface; -import org.pentaho.di.trans.step.StepInterface; -import org.pentaho.di.trans.step.StepMeta; -import org.pentaho.di.trans.step.StepMetaInterface; +import org.pentaho.di.trans.step.*; import org.pentaho.di.trans.step.errorhandling.Stream; import org.pentaho.di.trans.step.errorhandling.StreamIcon; import org.pentaho.di.trans.step.errorhandling.StreamInterface; @@ -52,219 +44,223 @@ import org.pentaho.metastore.api.IMetaStore; import org.w3c.dom.Node; +import java.util.List; + /** * Contains the meta-data for the java filter step: calculates conditions using Janino - * + *

* Created on 30-oct-2009 */ public class JavaFilterMeta extends BaseStepMeta implements StepMetaInterface { - private static Class PKG = JavaFilterMeta.class; // for i18n purposes, needed by Translator2!! - - /** The formula calculations to be performed */ - private String condition; + private static Class PKG = JavaFilterMeta.class; // for i18n purposes, needed by Translator2!! - public JavaFilterMeta() { - super(); // allocate BaseStepMeta - } + /** + * The formula calculations to be performed + */ + private String condition; - public String getCondition() { - return condition; - } + public JavaFilterMeta() { + super(); // allocate BaseStepMeta + } - public void setCondition( String condition ) { - this.condition = condition; - } + public String getCondition() { + return condition; + } - public void allocate( int nrCalcs ) { - } + public void setCondition(String condition) { + this.condition = condition; + } - public void loadXML( Node stepnode, List databases, IMetaStore metaStore ) throws KettleXMLException { - List targetStreams = getStepIOMeta().getTargetStreams(); + public void allocate(int nrCalcs) { + } - targetStreams.get( 0 ).setSubject( XMLHandler.getTagValue( stepnode, "send_true_to" ) ); - targetStreams.get( 1 ).setSubject( XMLHandler.getTagValue( stepnode, "send_false_to" ) ); + public void loadXML(Node stepnode, List databases, IMetaStore metaStore) throws KettleXMLException { + List targetStreams = getStepIOMeta().getTargetStreams(); - condition = XMLHandler.getTagValue( stepnode, "condition" ); - } + targetStreams.get(0).setSubject(XMLHandler.getTagValue(stepnode, "send_true_to")); + targetStreams.get(1).setSubject(XMLHandler.getTagValue(stepnode, "send_false_to")); - public String getXML() { - StringBuffer retval = new StringBuffer(); + condition = XMLHandler.getTagValue(stepnode, "condition"); + } - List targetStreams = getStepIOMeta().getTargetStreams(); - retval.append( XMLHandler.addTagValue( "send_true_to", targetStreams.get( 0 ).getStepname() ) ); - retval.append( XMLHandler.addTagValue( "send_false_to", targetStreams.get( 1 ).getStepname() ) ); + public String getXML() { + StringBuffer retval = new StringBuffer(); - retval.append( XMLHandler.addTagValue( "condition", condition ) ); + List targetStreams = getStepIOMeta().getTargetStreams(); + retval.append(XMLHandler.addTagValue("send_true_to", targetStreams.get(0).getStepname())); + retval.append(XMLHandler.addTagValue("send_false_to", targetStreams.get(1).getStepname())); - return retval.toString(); - } + retval.append(XMLHandler.addTagValue("condition", condition)); - public boolean equals( Object obj ) { - if ( obj != null && ( obj.getClass().equals( this.getClass() ) ) ) { - JavaFilterMeta m = (JavaFilterMeta) obj; - return ( getXML() == m.getXML() ); + return retval.toString(); } - return false; - } + public boolean equals(Object obj) { + if (obj != null && (obj.getClass().equals(this.getClass()))) { + JavaFilterMeta m = (JavaFilterMeta) obj; + return (getXML() == m.getXML()); + } - public Object clone() { - JavaFilterMeta retval = (JavaFilterMeta) super.clone(); - return retval; - } + return false; + } - public void setDefault() { - condition = "true"; - } + public Object clone() { + JavaFilterMeta retval = (JavaFilterMeta) super.clone(); + return retval; + } - public void readRep( Repository rep, IMetaStore metaStore, ObjectId id_step, List databases ) throws KettleException { - List targetStreams = getStepIOMeta().getTargetStreams(); + public void setDefault() { + condition = "true"; + } - targetStreams.get( 0 ).setSubject( rep.getStepAttributeString( id_step, "send_true_to" ) ); - targetStreams.get( 1 ).setSubject( rep.getStepAttributeString( id_step, "send_false_to" ) ); + public void readRep(Repository rep, IMetaStore metaStore, ObjectId id_step, List databases) throws KettleException { + List targetStreams = getStepIOMeta().getTargetStreams(); - condition = rep.getStepAttributeString( id_step, "condition" ); - } + targetStreams.get(0).setSubject(rep.getStepAttributeString(id_step, "send_true_to")); + targetStreams.get(1).setSubject(rep.getStepAttributeString(id_step, "send_false_to")); - @Override - public void searchInfoAndTargetSteps( List steps ) { - for ( StreamInterface stream : getStepIOMeta().getTargetStreams() ) { - stream.setStepMeta( StepMeta.findStep( steps, (String) stream.getSubject() ) ); - } - } - - public void saveRep( Repository rep, IMetaStore metaStore, ObjectId id_transformation, ObjectId id_step ) throws KettleException { - List targetStreams = getStepIOMeta().getTargetStreams(); - - rep.saveStepAttribute( id_transformation, id_step, "send_true_to", targetStreams.get( 0 ).getStepname() ); - rep.saveStepAttribute( id_transformation, id_step, "send_false_to", targetStreams.get( 1 ).getStepname() ); - - rep.saveStepAttribute( id_transformation, id_step, "condition", condition ); - } - - public void check( List remarks, TransMeta transMeta, StepMeta stepMeta, - RowMetaInterface prev, String[] input, String[] output, RowMetaInterface info, VariableSpace space, - Repository repository, IMetaStore metaStore ) { - CheckResult cr; - String error_message = ""; - - List targetStreams = getStepIOMeta().getTargetStreams(); - - if ( targetStreams.get( 0 ).getStepname() != null && targetStreams.get( 1 ).getStepname() != null ) { - cr = - new CheckResult( CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( - PKG, "JavaFilterMeta.CheckResult.BothTrueAndFalseStepSpecified" ), stepMeta ); - remarks.add( cr ); - } else if ( targetStreams.get( 0 ).getStepname() == null && targetStreams.get( 1 ).getStepname() == null ) { - cr = - new CheckResult( CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( - PKG, "JavaFilterMeta.CheckResult.NeitherTrueAndFalseStepSpecified" ), stepMeta ); - remarks.add( cr ); - } else { - cr = - new CheckResult( CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( - PKG, "JavaFilterMeta.CheckResult.PlsSpecifyBothTrueAndFalseStep" ), stepMeta ); - remarks.add( cr ); + condition = rep.getStepAttributeString(id_step, "condition"); } - if ( targetStreams.get( 0 ).getStepname() != null ) { - int trueTargetIdx = Const.indexOfString( targetStreams.get( 0 ).getStepname(), output ); - if ( trueTargetIdx < 0 ) { - cr = - new CheckResult( - CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages.getString( - PKG, "JavaFilterMeta.CheckResult.TargetStepInvalid", "true", targetStreams - .get( 0 ).getStepname() ), stepMeta ); - remarks.add( cr ); - } + @Override + public void searchInfoAndTargetSteps(List steps) { + for (StreamInterface stream : getStepIOMeta().getTargetStreams()) { + stream.setStepMeta(StepMeta.findStep(steps, (String) stream.getSubject())); + } } - if ( targetStreams.get( 1 ).getStepname() != null ) { - int falseTargetIdx = Const.indexOfString( targetStreams.get( 1 ).getStepname(), output ); - if ( falseTargetIdx < 0 ) { - cr = - new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages - .getString( PKG, "JavaFilterMeta.CheckResult.TargetStepInvalid", "false", targetStreams - .get( 1 ).getStepname() ), stepMeta ); - remarks.add( cr ); - } - } + public void saveRep(Repository rep, IMetaStore metaStore, ObjectId id_transformation, ObjectId id_step) throws KettleException { + List targetStreams = getStepIOMeta().getTargetStreams(); + + rep.saveStepAttribute(id_transformation, id_step, "send_true_to", targetStreams.get(0).getStepname()); + rep.saveStepAttribute(id_transformation, id_step, "send_false_to", targetStreams.get(1).getStepname()); - if ( Const.isEmpty( condition ) ) { - cr = - new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages.getString( - PKG, "JavaFilterMeta.CheckResult.NoConditionSpecified" ), stepMeta ); - } else { - cr = - new CheckResult( CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( - PKG, "JavaFilterMeta.CheckResult.ConditionSpecified" ), stepMeta ); + rep.saveStepAttribute(id_transformation, id_step, "condition", condition); } - remarks.add( cr ); - - // Look up fields in the input stream - if ( prev != null && prev.size() > 0 ) { - cr = - new CheckResult( CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( - PKG, "JavaFilterMeta.CheckResult.StepReceivingFields", prev.size() + "" ), stepMeta ); - remarks.add( cr ); - - // What fields are used in the condition? - // TODO: verify condition, parse it - // - } else { - error_message = - BaseMessages.getString( PKG, "JavaFilterMeta.CheckResult.CouldNotReadFieldsFromPreviousStep" ) - + Const.CR; - cr = new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, error_message, stepMeta ); - remarks.add( cr ); + + public void check(List remarks, TransMeta transMeta, StepMeta stepMeta, + RowMetaInterface prev, String[] input, String[] output, RowMetaInterface info, VariableSpace space, + Repository repository, IMetaStore metaStore) { + CheckResult cr; + String error_message = ""; + + List targetStreams = getStepIOMeta().getTargetStreams(); + + if (targetStreams.get(0).getStepname() != null && targetStreams.get(1).getStepname() != null) { + cr = + new CheckResult(CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( + PKG, "JavaFilterMeta.CheckResult.BothTrueAndFalseStepSpecified"), stepMeta); + remarks.add(cr); + } else if (targetStreams.get(0).getStepname() == null && targetStreams.get(1).getStepname() == null) { + cr = + new CheckResult(CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( + PKG, "JavaFilterMeta.CheckResult.NeitherTrueAndFalseStepSpecified"), stepMeta); + remarks.add(cr); + } else { + cr = + new CheckResult(CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( + PKG, "JavaFilterMeta.CheckResult.PlsSpecifyBothTrueAndFalseStep"), stepMeta); + remarks.add(cr); + } + + if (targetStreams.get(0).getStepname() != null) { + int trueTargetIdx = Const.indexOfString(targetStreams.get(0).getStepname(), output); + if (trueTargetIdx < 0) { + cr = + new CheckResult( + CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages.getString( + PKG, "JavaFilterMeta.CheckResult.TargetStepInvalid", "true", targetStreams + .get(0).getStepname()), stepMeta); + remarks.add(cr); + } + } + + if (targetStreams.get(1).getStepname() != null) { + int falseTargetIdx = Const.indexOfString(targetStreams.get(1).getStepname(), output); + if (falseTargetIdx < 0) { + cr = + new CheckResult(CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages + .getString(PKG, "JavaFilterMeta.CheckResult.TargetStepInvalid", "false", targetStreams + .get(1).getStepname()), stepMeta); + remarks.add(cr); + } + } + + if (Const.isEmpty(condition)) { + cr = + new CheckResult(CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages.getString( + PKG, "JavaFilterMeta.CheckResult.NoConditionSpecified"), stepMeta); + } else { + cr = + new CheckResult(CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( + PKG, "JavaFilterMeta.CheckResult.ConditionSpecified"), stepMeta); + } + remarks.add(cr); + + // Look up fields in the input stream + if (prev != null && prev.size() > 0) { + cr = + new CheckResult(CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( + PKG, "JavaFilterMeta.CheckResult.StepReceivingFields", prev.size() + ""), stepMeta); + remarks.add(cr); + + // What fields are used in the condition? + // TODO: verify condition, parse it + // + } else { + error_message = + BaseMessages.getString(PKG, "JavaFilterMeta.CheckResult.CouldNotReadFieldsFromPreviousStep") + + Const.CR; + cr = new CheckResult(CheckResultInterface.TYPE_RESULT_ERROR, error_message, stepMeta); + remarks.add(cr); + } + + // See if we have input streams leading to this step! + if (input.length > 0) { + cr = + new CheckResult(CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( + PKG, "JavaFilterMeta.CheckResult.StepReceivingInfoFromOtherSteps"), stepMeta); + remarks.add(cr); + } else { + cr = + new CheckResult(CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages.getString( + PKG, "JavaFilterMeta.CheckResult.NoInputReceivedFromOtherSteps"), stepMeta); + remarks.add(cr); + } } - // See if we have input streams leading to this step! - if ( input.length > 0 ) { - cr = - new CheckResult( CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( - PKG, "JavaFilterMeta.CheckResult.StepReceivingInfoFromOtherSteps" ), stepMeta ); - remarks.add( cr ); - } else { - cr = - new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages.getString( - PKG, "JavaFilterMeta.CheckResult.NoInputReceivedFromOtherSteps" ), stepMeta ); - remarks.add( cr ); + public StepInterface getStep(StepMeta stepMeta, StepDataInterface stepDataInterface, int cnr, TransMeta tr, + Trans trans) { + return new JavaFilter(stepMeta, stepDataInterface, cnr, tr, trans); } - } - public StepInterface getStep( StepMeta stepMeta, StepDataInterface stepDataInterface, int cnr, TransMeta tr, - Trans trans ) { - return new JavaFilter( stepMeta, stepDataInterface, cnr, tr, trans ); - } + public StepDataInterface getStepData() { + return new JavaFilterData(); + } - public StepDataInterface getStepData() { - return new JavaFilterData(); - } + /** + * Returns the Input/Output metadata for this step. + */ + public StepIOMetaInterface getStepIOMeta() { + if (ioMeta == null) { - /** - * Returns the Input/Output metadata for this step. - */ - public StepIOMetaInterface getStepIOMeta() { - if ( ioMeta == null ) { + ioMeta = new StepIOMeta(true, true, false, false, false, false); - ioMeta = new StepIOMeta( true, true, false, false, false, false ); + ioMeta.addStream(new Stream(StreamType.TARGET, null, BaseMessages.getString( + PKG, "JavaFilterMeta.InfoStream.True.Description"), StreamIcon.TRUE, null)); + ioMeta.addStream(new Stream(StreamType.TARGET, null, BaseMessages.getString( + PKG, "JavaFilterMeta.InfoStream.False.Description"), StreamIcon.FALSE, null)); + } - ioMeta.addStream( new Stream( StreamType.TARGET, null, BaseMessages.getString( - PKG, "JavaFilterMeta.InfoStream.True.Description" ), StreamIcon.TRUE, null ) ); - ioMeta.addStream( new Stream( StreamType.TARGET, null, BaseMessages.getString( - PKG, "JavaFilterMeta.InfoStream.False.Description" ), StreamIcon.FALSE, null ) ); + return ioMeta; } - return ioMeta; - } - - @Override - public void resetStepIoMeta() { - } + @Override + public void resetStepIoMeta() { + } - @Override - public boolean excludeFromCopyDistributeVerification() { - return true; - } + @Override + public boolean excludeFromCopyDistributeVerification() { + return true; + } } diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/mergejoin/MergeJoinMeta.java b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/mergejoin/MergeJoinMeta.java index 8d307f5..de6acf1 100644 --- a/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/mergejoin/MergeJoinMeta.java +++ b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/mergejoin/MergeJoinMeta.java @@ -22,8 +22,6 @@ package org.pentaho.di.trans.steps.mergejoin; -import java.util.List; - import org.pentaho.di.core.CheckResult; import org.pentaho.di.core.CheckResultInterface; import org.pentaho.di.core.Const; @@ -41,13 +39,7 @@ import org.pentaho.di.trans.Trans; import org.pentaho.di.trans.TransMeta; import org.pentaho.di.trans.TransMeta.TransformationType; -import org.pentaho.di.trans.step.BaseStepMeta; -import org.pentaho.di.trans.step.StepDataInterface; -import org.pentaho.di.trans.step.StepIOMeta; -import org.pentaho.di.trans.step.StepIOMetaInterface; -import org.pentaho.di.trans.step.StepInterface; -import org.pentaho.di.trans.step.StepMeta; -import org.pentaho.di.trans.step.StepMetaInterface; +import org.pentaho.di.trans.step.*; import org.pentaho.di.trans.step.errorhandling.Stream; import org.pentaho.di.trans.step.errorhandling.StreamIcon; import org.pentaho.di.trans.step.errorhandling.StreamInterface; @@ -55,275 +47,277 @@ import org.pentaho.metastore.api.IMetaStore; import org.w3c.dom.Node; +import java.util.List; + /* * @author Biswapesh * @since 24-nov-2006 */ public class MergeJoinMeta extends BaseStepMeta implements StepMetaInterface { - private static Class PKG = MergeJoinMeta.class; // for i18n purposes, needed by Translator2!! - - public static final String[] join_types = { "INNER", "LEFT OUTER", "RIGHT OUTER", "FULL OUTER" }; - public static final boolean[] one_optionals = { false, false, true, true }; - public static final boolean[] two_optionals = { false, true, false, true }; - - private String joinType; - - private String[] keyFields1; - private String[] keyFields2; - - /** - * The supported join types are INNER, LEFT OUTER, RIGHT OUTER and FULL OUTER - * - * @return The type of join - */ - public String getJoinType() { - return joinType; - } - - /** - * Sets the type of join - * - * @param joinType The type of join, e.g. INNER/FULL OUTER - */ - public void setJoinType( String joinType ) { - this.joinType = joinType; - } - - /** - * @return Returns the keyFields1. - */ - public String[] getKeyFields1() { - return keyFields1; - } - - /** - * @param keyFields1 The keyFields1 to set. - */ - public void setKeyFields1( String[] keyFields1 ) { - this.keyFields1 = keyFields1; - } - - /** - * @return Returns the keyFields2. - */ - public String[] getKeyFields2() { - return keyFields2; - } - - /** - * @param keyFields2 The keyFields2 to set. - */ - public void setKeyFields2( String[] keyFields2 ) { - this.keyFields2 = keyFields2; - } - - public boolean excludeFromRowLayoutVerification() { - return true; - } - - public MergeJoinMeta() { - super(); // allocate BaseStepMeta - } - - public void loadXML( Node stepnode, List databases, IMetaStore metaStore ) throws KettleXMLException { - readData( stepnode ); - } - - public void allocate( int nrKeys1, int nrKeys2 ) { - keyFields1 = new String[nrKeys1]; - keyFields2 = new String[nrKeys2]; - } - - public Object clone() { - MergeJoinMeta retval = (MergeJoinMeta) super.clone(); - int nrKeys1 = keyFields1.length; - int nrKeys2 = keyFields2.length; - retval.allocate( nrKeys1, nrKeys2 ); - System.arraycopy( keyFields1, 0, retval.keyFields1, 0, nrKeys1 ); - System.arraycopy( keyFields2, 0, retval.keyFields2, 0, nrKeys2 ); - - return retval; - } - - public String getXML() { - StringBuffer retval = new StringBuffer(); - - List infoStreams = getStepIOMeta().getInfoStreams(); - - retval.append( XMLHandler.addTagValue( "join_type", getJoinType() ) ); - retval.append( XMLHandler.addTagValue( "step1", infoStreams.get( 0 ).getStepname() ) ); - retval.append( XMLHandler.addTagValue( "step2", infoStreams.get( 1 ).getStepname() ) ); - - retval.append( " " + Const.CR ); - for ( int i = 0; i < keyFields1.length; i++ ) { - retval.append( " " + XMLHandler.addTagValue( "key", keyFields1[i] ) ); + private static Class PKG = MergeJoinMeta.class; // for i18n purposes, needed by Translator2!! + + public static final String[] join_types = {"INNER", "LEFT OUTER", "RIGHT OUTER", "FULL OUTER"}; + public static final boolean[] one_optionals = {false, false, true, true}; + public static final boolean[] two_optionals = {false, true, false, true}; + + private String joinType; + + private String[] keyFields1; + private String[] keyFields2; + + /** + * The supported join types are INNER, LEFT OUTER, RIGHT OUTER and FULL OUTER + * + * @return The type of join + */ + public String getJoinType() { + return joinType; } - retval.append( " " + Const.CR ); - retval.append( " " + Const.CR ); - for ( int i = 0; i < keyFields2.length; i++ ) { - retval.append( " " + XMLHandler.addTagValue( "key", keyFields2[i] ) ); + /** + * Sets the type of join + * + * @param joinType The type of join, e.g. INNER/FULL OUTER + */ + public void setJoinType(String joinType) { + this.joinType = joinType; } - retval.append( " " + Const.CR ); - return retval.toString(); - } + /** + * @return Returns the keyFields1. + */ + public String[] getKeyFields1() { + return keyFields1; + } - private void readData( Node stepnode ) throws KettleXMLException { - try { + /** + * @param keyFields1 The keyFields1 to set. + */ + public void setKeyFields1(String[] keyFields1) { + this.keyFields1 = keyFields1; + } - Node keysNode1 = XMLHandler.getSubNode( stepnode, "keys_1" ); - Node keysNode2 = XMLHandler.getSubNode( stepnode, "keys_2" ); + /** + * @return Returns the keyFields2. + */ + public String[] getKeyFields2() { + return keyFields2; + } - int nrKeys1 = XMLHandler.countNodes( keysNode1, "key" ); - int nrKeys2 = XMLHandler.countNodes( keysNode2, "key" ); + /** + * @param keyFields2 The keyFields2 to set. + */ + public void setKeyFields2(String[] keyFields2) { + this.keyFields2 = keyFields2; + } - allocate( nrKeys1, nrKeys2 ); + public boolean excludeFromRowLayoutVerification() { + return true; + } - for ( int i = 0; i < nrKeys1; i++ ) { - Node keynode = XMLHandler.getSubNodeByNr( keysNode1, "key", i ); - keyFields1[i] = XMLHandler.getNodeValue( keynode ); - } + public MergeJoinMeta() { + super(); // allocate BaseStepMeta + } - for ( int i = 0; i < nrKeys2; i++ ) { - Node keynode = XMLHandler.getSubNodeByNr( keysNode2, "key", i ); - keyFields2[i] = XMLHandler.getNodeValue( keynode ); - } + public void loadXML(Node stepnode, List databases, IMetaStore metaStore) throws KettleXMLException { + readData(stepnode); + } - List infoStreams = getStepIOMeta().getInfoStreams(); - infoStreams.get( 0 ).setSubject( XMLHandler.getTagValue( stepnode, "step1" ) ); - infoStreams.get( 1 ).setSubject( XMLHandler.getTagValue( stepnode, "step2" ) ); - joinType = XMLHandler.getTagValue( stepnode, "join_type" ); - } catch ( Exception e ) { - throw new KettleXMLException( - BaseMessages.getString( PKG, "MergeJoinMeta.Exception.UnableToLoadStepInfo" ), e ); + public void allocate(int nrKeys1, int nrKeys2) { + keyFields1 = new String[nrKeys1]; + keyFields2 = new String[nrKeys2]; } - } - - public void setDefault() { - joinType = join_types[0]; - allocate( 0, 0 ); - } - - public void readRep( Repository rep, IMetaStore metaStore, ObjectId id_step, List databases ) throws KettleException { - try { - int nrKeys1 = rep.countNrStepAttributes( id_step, "keys_1" ); - int nrKeys2 = rep.countNrStepAttributes( id_step, "keys_2" ); - - allocate( nrKeys1, nrKeys2 ); - - for ( int i = 0; i < nrKeys1; i++ ) { - keyFields1[i] = rep.getStepAttributeString( id_step, i, "keys_1" ); - } - for ( int i = 0; i < nrKeys2; i++ ) { - keyFields2[i] = rep.getStepAttributeString( id_step, i, "keys_2" ); - } - - List infoStreams = getStepIOMeta().getInfoStreams(); - infoStreams.get( 0 ).setSubject( rep.getStepAttributeString( id_step, "step1" ) ); - infoStreams.get( 1 ).setSubject( rep.getStepAttributeString( id_step, "step2" ) ); - joinType = rep.getStepAttributeString( id_step, "join_type" ); - } catch ( Exception e ) { - throw new KettleException( BaseMessages.getString( - PKG, "MergeJoinMeta.Exception.UnexpectedErrorReadingStepInfo" ), e ); + + public Object clone() { + MergeJoinMeta retval = (MergeJoinMeta) super.clone(); + int nrKeys1 = keyFields1.length; + int nrKeys2 = keyFields2.length; + retval.allocate(nrKeys1, nrKeys2); + System.arraycopy(keyFields1, 0, retval.keyFields1, 0, nrKeys1); + System.arraycopy(keyFields2, 0, retval.keyFields2, 0, nrKeys2); + + return retval; } - } - @Override - public void searchInfoAndTargetSteps( List steps ) { - for ( StreamInterface stream : getStepIOMeta().getInfoStreams() ) { - stream.setStepMeta( StepMeta.findStep( steps, (String) stream.getSubject() ) ); + public String getXML() { + StringBuffer retval = new StringBuffer(); + + List infoStreams = getStepIOMeta().getInfoStreams(); + + retval.append(XMLHandler.addTagValue("join_type", getJoinType())); + retval.append(XMLHandler.addTagValue("step1", infoStreams.get(0).getStepname())); + retval.append(XMLHandler.addTagValue("step2", infoStreams.get(1).getStepname())); + + retval.append(" " + Const.CR); + for (int i = 0; i < keyFields1.length; i++) { + retval.append(" " + XMLHandler.addTagValue("key", keyFields1[i])); + } + retval.append(" " + Const.CR); + + retval.append(" " + Const.CR); + for (int i = 0; i < keyFields2.length; i++) { + retval.append(" " + XMLHandler.addTagValue("key", keyFields2[i])); + } + retval.append(" " + Const.CR); + + return retval.toString(); } - } - - public void saveRep( Repository rep, IMetaStore metaStore, ObjectId id_transformation, ObjectId id_step ) throws KettleException { - try { - for ( int i = 0; i < keyFields1.length; i++ ) { - rep.saveStepAttribute( id_transformation, id_step, i, "keys_1", keyFields1[i] ); - } - - for ( int i = 0; i < keyFields2.length; i++ ) { - rep.saveStepAttribute( id_transformation, id_step, i, "keys_2", keyFields2[i] ); - } - - List infoStreams = getStepIOMeta().getInfoStreams(); - - rep.saveStepAttribute( id_transformation, id_step, "step1", infoStreams.get( 0 ).getStepname() ); - rep.saveStepAttribute( id_transformation, id_step, "step2", infoStreams.get( 1 ).getStepname() ); - rep.saveStepAttribute( id_transformation, id_step, "join_type", getJoinType() ); - } catch ( Exception e ) { - throw new KettleException( BaseMessages.getString( PKG, "MergeJoinMeta.Exception.UnableToSaveStepInfo" ) - + id_step, e ); + + private void readData(Node stepnode) throws KettleXMLException { + try { + + Node keysNode1 = XMLHandler.getSubNode(stepnode, "keys_1"); + Node keysNode2 = XMLHandler.getSubNode(stepnode, "keys_2"); + + int nrKeys1 = XMLHandler.countNodes(keysNode1, "key"); + int nrKeys2 = XMLHandler.countNodes(keysNode2, "key"); + + allocate(nrKeys1, nrKeys2); + + for (int i = 0; i < nrKeys1; i++) { + Node keynode = XMLHandler.getSubNodeByNr(keysNode1, "key", i); + keyFields1[i] = XMLHandler.getNodeValue(keynode); + } + + for (int i = 0; i < nrKeys2; i++) { + Node keynode = XMLHandler.getSubNodeByNr(keysNode2, "key", i); + keyFields2[i] = XMLHandler.getNodeValue(keynode); + } + + List infoStreams = getStepIOMeta().getInfoStreams(); + infoStreams.get(0).setSubject(XMLHandler.getTagValue(stepnode, "step1")); + infoStreams.get(1).setSubject(XMLHandler.getTagValue(stepnode, "step2")); + joinType = XMLHandler.getTagValue(stepnode, "join_type"); + } catch (Exception e) { + throw new KettleXMLException( + BaseMessages.getString(PKG, "MergeJoinMeta.Exception.UnableToLoadStepInfo"), e); + } + } + + public void setDefault() { + joinType = join_types[0]; + allocate(0, 0); + } + + public void readRep(Repository rep, IMetaStore metaStore, ObjectId id_step, List databases) throws KettleException { + try { + int nrKeys1 = rep.countNrStepAttributes(id_step, "keys_1"); + int nrKeys2 = rep.countNrStepAttributes(id_step, "keys_2"); + + allocate(nrKeys1, nrKeys2); + + for (int i = 0; i < nrKeys1; i++) { + keyFields1[i] = rep.getStepAttributeString(id_step, i, "keys_1"); + } + for (int i = 0; i < nrKeys2; i++) { + keyFields2[i] = rep.getStepAttributeString(id_step, i, "keys_2"); + } + + List infoStreams = getStepIOMeta().getInfoStreams(); + infoStreams.get(0).setSubject(rep.getStepAttributeString(id_step, "step1")); + infoStreams.get(1).setSubject(rep.getStepAttributeString(id_step, "step2")); + joinType = rep.getStepAttributeString(id_step, "join_type"); + } catch (Exception e) { + throw new KettleException(BaseMessages.getString( + PKG, "MergeJoinMeta.Exception.UnexpectedErrorReadingStepInfo"), e); + } + } + + @Override + public void searchInfoAndTargetSteps(List steps) { + for (StreamInterface stream : getStepIOMeta().getInfoStreams()) { + stream.setStepMeta(StepMeta.findStep(steps, (String) stream.getSubject())); + } + } + + public void saveRep(Repository rep, IMetaStore metaStore, ObjectId id_transformation, ObjectId id_step) throws KettleException { + try { + for (int i = 0; i < keyFields1.length; i++) { + rep.saveStepAttribute(id_transformation, id_step, i, "keys_1", keyFields1[i]); + } + + for (int i = 0; i < keyFields2.length; i++) { + rep.saveStepAttribute(id_transformation, id_step, i, "keys_2", keyFields2[i]); + } + + List infoStreams = getStepIOMeta().getInfoStreams(); + + rep.saveStepAttribute(id_transformation, id_step, "step1", infoStreams.get(0).getStepname()); + rep.saveStepAttribute(id_transformation, id_step, "step2", infoStreams.get(1).getStepname()); + rep.saveStepAttribute(id_transformation, id_step, "join_type", getJoinType()); + } catch (Exception e) { + throw new KettleException(BaseMessages.getString(PKG, "MergeJoinMeta.Exception.UnableToSaveStepInfo") + + id_step, e); + } } - } - public void check( List remarks, TransMeta transMeta, StepMeta stepMeta, - RowMetaInterface prev, String[] input, String[] output, RowMetaInterface info, VariableSpace space, - Repository repository, IMetaStore metaStore ) { + public void check(List remarks, TransMeta transMeta, StepMeta stepMeta, + RowMetaInterface prev, String[] input, String[] output, RowMetaInterface info, VariableSpace space, + Repository repository, IMetaStore metaStore) { /* * @todo Need to check for the following: 1) Join type must be one of INNER / LEFT OUTER / RIGHT OUTER / FULL OUTER * 2) Number of input streams must be two (for now at least) 3) The field names of input streams must be unique */ - CheckResult cr = - new CheckResult( CheckResultInterface.TYPE_RESULT_WARNING, BaseMessages.getString( - PKG, "MergeJoinMeta.CheckResult.StepNotVerified" ), stepMeta ); - remarks.add( cr ); - } - - public void getFields( RowMetaInterface r, String name, RowMetaInterface[] info, StepMeta nextStep, - VariableSpace space, Repository repository, IMetaStore metaStore ) throws KettleStepException { - // We don't have any input fields here in "r" as they are all info fields. - // So we just merge in the info fields. - // - if ( info != null ) { - for ( int i = 0; i < info.length; i++ ) { - if ( info[i] != null ) { - r.mergeRowMeta( info[i], name ); + CheckResult cr = + new CheckResult(CheckResultInterface.TYPE_RESULT_WARNING, BaseMessages.getString( + PKG, "MergeJoinMeta.CheckResult.StepNotVerified"), stepMeta); + remarks.add(cr); + } + + public void getFields(RowMetaInterface r, String name, RowMetaInterface[] info, StepMeta nextStep, + VariableSpace space, Repository repository, IMetaStore metaStore) throws KettleStepException { + // We don't have any input fields here in "r" as they are all info fields. + // So we just merge in the info fields. + // + if (info != null) { + for (int i = 0; i < info.length; i++) { + if (info[i] != null) { + r.mergeRowMeta(info[i], name); + } + } + } + + for (int i = 0; i < r.size(); i++) { + ValueMetaInterface vmi = r.getValueMeta(i); + if (vmi != null && Const.isEmpty(vmi.getName())) { + vmi.setOrigin(name); + } } - } + return; } - for ( int i = 0; i < r.size(); i++ ) { - ValueMetaInterface vmi = r.getValueMeta( i ); - if ( vmi != null && Const.isEmpty( vmi.getName() ) ) { - vmi.setOrigin( name ); - } + public StepInterface getStep(StepMeta stepMeta, StepDataInterface stepDataInterface, int cnr, TransMeta tr, + Trans trans) { + return new MergeJoin(stepMeta, stepDataInterface, cnr, tr, trans); } - return; - } - - public StepInterface getStep( StepMeta stepMeta, StepDataInterface stepDataInterface, int cnr, TransMeta tr, - Trans trans ) { - return new MergeJoin( stepMeta, stepDataInterface, cnr, tr, trans ); - } - - public StepDataInterface getStepData() { - return new MergeJoinData(); - } - - /** - * Returns the Input/Output metadata for this step. The generator step only produces output, does not accept input! - */ - public StepIOMetaInterface getStepIOMeta() { - if ( ioMeta == null ) { - - ioMeta = new StepIOMeta( true, true, false, false, false, false ); - - ioMeta.addStream( new Stream( StreamType.INFO, null, BaseMessages.getString( - PKG, "MergeJoinMeta.InfoStream.FirstStream.Description" ), StreamIcon.INFO, null ) ); - ioMeta.addStream( new Stream( StreamType.INFO, null, BaseMessages.getString( - PKG, "MergeJoinMeta.InfoStream.SecondStream.Description" ), StreamIcon.INFO, null ) ); + + public StepDataInterface getStepData() { + return new MergeJoinData(); } - return ioMeta; - } + /** + * Returns the Input/Output metadata for this step. The generator step only produces output, does not accept input! + */ + public StepIOMetaInterface getStepIOMeta() { + if (ioMeta == null) { + + ioMeta = new StepIOMeta(true, true, false, false, false, false); + + ioMeta.addStream(new Stream(StreamType.INFO, null, BaseMessages.getString( + PKG, "MergeJoinMeta.InfoStream.FirstStream.Description"), StreamIcon.INFO, null)); + ioMeta.addStream(new Stream(StreamType.INFO, null, BaseMessages.getString( + PKG, "MergeJoinMeta.InfoStream.SecondStream.Description"), StreamIcon.INFO, null)); + } - public void resetStepIoMeta() { - // Don't reset! - } + return ioMeta; + } + + public void resetStepIoMeta() { + // Don't reset! + } - public TransformationType[] getSupportedTransformationTypes() { - return new TransformationType[]{ TransformationType.Normal, }; - } + public TransformationType[] getSupportedTransformationTypes() { + return new TransformationType[]{TransformationType.Normal,}; + } } diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/mergerows/MergeRowsMeta.java b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/mergerows/MergeRowsMeta.java index aebf062..2d5dc5e 100644 --- a/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/mergerows/MergeRowsMeta.java +++ b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/mergerows/MergeRowsMeta.java @@ -22,8 +22,6 @@ package org.pentaho.di.trans.steps.mergerows; -import java.util.List; - import org.pentaho.di.core.CheckResult; import org.pentaho.di.core.CheckResultInterface; import org.pentaho.di.core.Const; @@ -42,13 +40,7 @@ import org.pentaho.di.trans.Trans; import org.pentaho.di.trans.TransMeta; import org.pentaho.di.trans.TransMeta.TransformationType; -import org.pentaho.di.trans.step.BaseStepMeta; -import org.pentaho.di.trans.step.StepDataInterface; -import org.pentaho.di.trans.step.StepIOMeta; -import org.pentaho.di.trans.step.StepIOMetaInterface; -import org.pentaho.di.trans.step.StepInterface; -import org.pentaho.di.trans.step.StepMeta; -import org.pentaho.di.trans.step.StepMetaInterface; +import org.pentaho.di.trans.step.*; import org.pentaho.di.trans.step.errorhandling.Stream; import org.pentaho.di.trans.step.errorhandling.StreamIcon; import org.pentaho.di.trans.step.errorhandling.StreamInterface; @@ -56,303 +48,302 @@ import org.pentaho.metastore.api.IMetaStore; import org.w3c.dom.Node; +import java.util.List; + /* * Created on 02-jun-2003 * */ public class MergeRowsMeta extends BaseStepMeta implements StepMetaInterface { - private static Class PKG = MergeRowsMeta.class; // for i18n purposes, needed by Translator2!! - - private String flagField; - - private String[] keyFields; - private String[] valueFields; - - /** - * @return Returns the keyFields. - */ - public String[] getKeyFields() { - return keyFields; - } - - /** - * @param keyFields - * The keyFields to set. - */ - public void setKeyFields( String[] keyFields ) { - this.keyFields = keyFields; - } - - /** - * @return Returns the valueFields. - */ - public String[] getValueFields() { - return valueFields; - } - - /** - * @param valueFields - * The valueFields to set. - */ - public void setValueFields( String[] valueFields ) { - this.valueFields = valueFields; - } - - public MergeRowsMeta() { - super(); // allocate BaseStepMeta - } - - public void loadXML( Node stepnode, List databases, IMetaStore metaStore ) throws KettleXMLException { - readData( stepnode ); - } - - /** - * @return Returns the flagField. - */ - public String getFlagField() { - return flagField; - } - - /** - * @param flagField - * The flagField to set. - */ - public void setFlagField( String flagField ) { - this.flagField = flagField; - } - - public void allocate( int nrKeys, int nrValues ) { - keyFields = new String[nrKeys]; - valueFields = new String[nrValues]; - } - - public Object clone() { - MergeRowsMeta retval = (MergeRowsMeta) super.clone(); - int nrKeys = keyFields.length; - int nrValues = valueFields.length; - retval.allocate( nrKeys, nrValues ); - System.arraycopy( keyFields, 0, retval.keyFields, 0, nrKeys ); - System.arraycopy( valueFields, 0, retval.valueFields, 0, nrValues ); - return retval; - } - - public String getXML() { - StringBuffer retval = new StringBuffer(); - - retval.append( " " + Const.CR ); - for ( int i = 0; i < keyFields.length; i++ ) { - retval.append( " " + XMLHandler.addTagValue( "key", keyFields[i] ) ); + private static Class PKG = MergeRowsMeta.class; // for i18n purposes, needed by Translator2!! + + private String flagField; + + private String[] keyFields; + private String[] valueFields; + + /** + * @return Returns the keyFields. + */ + public String[] getKeyFields() { + return keyFields; } - retval.append( " " + Const.CR ); - retval.append( " " + Const.CR ); - for ( int i = 0; i < valueFields.length; i++ ) { - retval.append( " " + XMLHandler.addTagValue( "value", valueFields[i] ) ); + /** + * @param keyFields The keyFields to set. + */ + public void setKeyFields(String[] keyFields) { + this.keyFields = keyFields; } - retval.append( " " + Const.CR ); - retval.append( XMLHandler.addTagValue( "flag_field", flagField ) ); + /** + * @return Returns the valueFields. + */ + public String[] getValueFields() { + return valueFields; + } - List infoStreams = getStepIOMeta().getInfoStreams(); - retval.append( XMLHandler.addTagValue( "reference", infoStreams.get( 0 ).getStepname() ) ); - retval.append( XMLHandler.addTagValue( "compare", infoStreams.get( 1 ).getStepname() ) ); - retval.append( " " + Const.CR ); + /** + * @param valueFields The valueFields to set. + */ + public void setValueFields(String[] valueFields) { + this.valueFields = valueFields; + } - retval.append( " " + Const.CR ); + public MergeRowsMeta() { + super(); // allocate BaseStepMeta + } - return retval.toString(); - } + public void loadXML(Node stepnode, List databases, IMetaStore metaStore) throws KettleXMLException { + readData(stepnode); + } - private void readData( Node stepnode ) throws KettleXMLException { - try { + /** + * @return Returns the flagField. + */ + public String getFlagField() { + return flagField; + } - Node keysnode = XMLHandler.getSubNode( stepnode, "keys" ); - Node valuesnode = XMLHandler.getSubNode( stepnode, "values" ); + /** + * @param flagField The flagField to set. + */ + public void setFlagField(String flagField) { + this.flagField = flagField; + } - int nrKeys = XMLHandler.countNodes( keysnode, "key" ); - int nrValues = XMLHandler.countNodes( valuesnode, "value" ); + public void allocate(int nrKeys, int nrValues) { + keyFields = new String[nrKeys]; + valueFields = new String[nrValues]; + } - allocate( nrKeys, nrValues ); + public Object clone() { + MergeRowsMeta retval = (MergeRowsMeta) super.clone(); + int nrKeys = keyFields.length; + int nrValues = valueFields.length; + retval.allocate(nrKeys, nrValues); + System.arraycopy(keyFields, 0, retval.keyFields, 0, nrKeys); + System.arraycopy(valueFields, 0, retval.valueFields, 0, nrValues); + return retval; + } + + public String getXML() { + StringBuffer retval = new StringBuffer(); - for ( int i = 0; i < nrKeys; i++ ) { - Node keynode = XMLHandler.getSubNodeByNr( keysnode, "key", i ); - keyFields[i] = XMLHandler.getNodeValue( keynode ); - } + retval.append(" " + Const.CR); + for (int i = 0; i < keyFields.length; i++) { + retval.append(" " + XMLHandler.addTagValue("key", keyFields[i])); + } + retval.append(" " + Const.CR); - for ( int i = 0; i < nrValues; i++ ) { - Node valuenode = XMLHandler.getSubNodeByNr( valuesnode, "value", i ); - valueFields[i] = XMLHandler.getNodeValue( valuenode ); - } + retval.append(" " + Const.CR); + for (int i = 0; i < valueFields.length; i++) { + retval.append(" " + XMLHandler.addTagValue("value", valueFields[i])); + } + retval.append(" " + Const.CR); - flagField = XMLHandler.getTagValue( stepnode, "flag_field" ); + retval.append(XMLHandler.addTagValue("flag_field", flagField)); - List infoStreams = getStepIOMeta().getInfoStreams(); - StreamInterface referenceStream = infoStreams.get( 0 ); - StreamInterface compareStream = infoStreams.get( 1 ); + List infoStreams = getStepIOMeta().getInfoStreams(); + retval.append(XMLHandler.addTagValue("reference", infoStreams.get(0).getStepname())); + retval.append(XMLHandler.addTagValue("compare", infoStreams.get(1).getStepname())); + retval.append(" " + Const.CR); - compareStream.setSubject( XMLHandler.getTagValue( stepnode, "compare" ) ); - referenceStream.setSubject( XMLHandler.getTagValue( stepnode, "reference" ) ); - } catch ( Exception e ) { - throw new KettleXMLException( - BaseMessages.getString( PKG, "MergeRowsMeta.Exception.UnableToLoadStepInfo" ), e ); + retval.append(" " + Const.CR); + + return retval.toString(); } - } - - public void setDefault() { - flagField = "flagfield"; - allocate( 0, 0 ); - } - - public void readRep( Repository rep, IMetaStore metaStore, ObjectId id_step, List databases ) throws KettleException { - try { - int nrKeys = rep.countNrStepAttributes( id_step, "key_field" ); - int nrValues = rep.countNrStepAttributes( id_step, "value_field" ); - - allocate( nrKeys, nrValues ); - - for ( int i = 0; i < nrKeys; i++ ) { - keyFields[i] = rep.getStepAttributeString( id_step, i, "key_field" ); - } - for ( int i = 0; i < nrValues; i++ ) { - valueFields[i] = rep.getStepAttributeString( id_step, i, "value_field" ); - } - - flagField = rep.getStepAttributeString( id_step, "flag_field" ); - - List infoStreams = getStepIOMeta().getInfoStreams(); - StreamInterface referenceStream = infoStreams.get( 0 ); - StreamInterface compareStream = infoStreams.get( 1 ); - - referenceStream.setSubject( rep.getStepAttributeString( id_step, "reference" ) ); - compareStream.setSubject( rep.getStepAttributeString( id_step, "compare" ) ); - } catch ( Exception e ) { - throw new KettleException( BaseMessages.getString( - PKG, "MergeRowsMeta.Exception.UnexpectedErrorReadingStepInfo" ), e ); + + private void readData(Node stepnode) throws KettleXMLException { + try { + + Node keysnode = XMLHandler.getSubNode(stepnode, "keys"); + Node valuesnode = XMLHandler.getSubNode(stepnode, "values"); + + int nrKeys = XMLHandler.countNodes(keysnode, "key"); + int nrValues = XMLHandler.countNodes(valuesnode, "value"); + + allocate(nrKeys, nrValues); + + for (int i = 0; i < nrKeys; i++) { + Node keynode = XMLHandler.getSubNodeByNr(keysnode, "key", i); + keyFields[i] = XMLHandler.getNodeValue(keynode); + } + + for (int i = 0; i < nrValues; i++) { + Node valuenode = XMLHandler.getSubNodeByNr(valuesnode, "value", i); + valueFields[i] = XMLHandler.getNodeValue(valuenode); + } + + flagField = XMLHandler.getTagValue(stepnode, "flag_field"); + + List infoStreams = getStepIOMeta().getInfoStreams(); + StreamInterface referenceStream = infoStreams.get(0); + StreamInterface compareStream = infoStreams.get(1); + + compareStream.setSubject(XMLHandler.getTagValue(stepnode, "compare")); + referenceStream.setSubject(XMLHandler.getTagValue(stepnode, "reference")); + } catch (Exception e) { + throw new KettleXMLException( + BaseMessages.getString(PKG, "MergeRowsMeta.Exception.UnableToLoadStepInfo"), e); + } } - } - @Override - public void searchInfoAndTargetSteps( List steps ) { - for ( StreamInterface stream : getStepIOMeta().getInfoStreams() ) { - stream.setStepMeta( StepMeta.findStep( steps, (String) stream.getSubject() ) ); + public void setDefault() { + flagField = "flagfield"; + allocate(0, 0); } - } - public void saveRep( Repository rep, IMetaStore metaStore, ObjectId id_transformation, ObjectId id_step ) throws KettleException { - try { - for ( int i = 0; i < keyFields.length; i++ ) { - rep.saveStepAttribute( id_transformation, id_step, i, "key_field", keyFields[i] ); - } + public void readRep(Repository rep, IMetaStore metaStore, ObjectId id_step, List databases) throws KettleException { + try { + int nrKeys = rep.countNrStepAttributes(id_step, "key_field"); + int nrValues = rep.countNrStepAttributes(id_step, "value_field"); - for ( int i = 0; i < valueFields.length; i++ ) { - rep.saveStepAttribute( id_transformation, id_step, i, "value_field", valueFields[i] ); - } + allocate(nrKeys, nrValues); - rep.saveStepAttribute( id_transformation, id_step, "flag_field", flagField ); + for (int i = 0; i < nrKeys; i++) { + keyFields[i] = rep.getStepAttributeString(id_step, i, "key_field"); + } + for (int i = 0; i < nrValues; i++) { + valueFields[i] = rep.getStepAttributeString(id_step, i, "value_field"); + } - List infoStreams = getStepIOMeta().getInfoStreams(); - StreamInterface referenceStream = infoStreams.get( 0 ); - StreamInterface compareStream = infoStreams.get( 1 ); + flagField = rep.getStepAttributeString(id_step, "flag_field"); - rep.saveStepAttribute( id_transformation, id_step, "reference", referenceStream.getStepname() ); - rep.saveStepAttribute( id_transformation, id_step, "compare", compareStream.getStepname() ); - } catch ( Exception e ) { - throw new KettleException( BaseMessages.getString( PKG, "MergeRowsMeta.Exception.UnableToSaveStepInfo" ) - + id_step, e ); + List infoStreams = getStepIOMeta().getInfoStreams(); + StreamInterface referenceStream = infoStreams.get(0); + StreamInterface compareStream = infoStreams.get(1); + + referenceStream.setSubject(rep.getStepAttributeString(id_step, "reference")); + compareStream.setSubject(rep.getStepAttributeString(id_step, "compare")); + } catch (Exception e) { + throw new KettleException(BaseMessages.getString( + PKG, "MergeRowsMeta.Exception.UnexpectedErrorReadingStepInfo"), e); + } } - } - - public boolean chosesTargetSteps() { - return false; - } - - public String[] getTargetSteps() { - return null; - } - - public void getFields( RowMetaInterface r, String name, RowMetaInterface[] info, StepMeta nextStep, - VariableSpace space, Repository repository, IMetaStore metaStore ) throws KettleStepException { - // We don't have any input fields here in "r" as they are all info fields. - // So we just merge in the info fields. - // - if ( info != null ) { - boolean found = false; - for ( int i = 0; i < info.length && !found; i++ ) { - if ( info[i] != null ) { - r.mergeRowMeta( info[i] ); - found = true; + + @Override + public void searchInfoAndTargetSteps(List steps) { + for (StreamInterface stream : getStepIOMeta().getInfoStreams()) { + stream.setStepMeta(StepMeta.findStep(steps, (String) stream.getSubject())); } - } } - if ( Const.isEmpty( flagField ) ) { - throw new KettleStepException( BaseMessages.getString( PKG, "MergeRowsMeta.Exception.FlagFieldNotSpecified" ) ); + public void saveRep(Repository rep, IMetaStore metaStore, ObjectId id_transformation, ObjectId id_step) throws KettleException { + try { + for (int i = 0; i < keyFields.length; i++) { + rep.saveStepAttribute(id_transformation, id_step, i, "key_field", keyFields[i]); + } + + for (int i = 0; i < valueFields.length; i++) { + rep.saveStepAttribute(id_transformation, id_step, i, "value_field", valueFields[i]); + } + + rep.saveStepAttribute(id_transformation, id_step, "flag_field", flagField); + + List infoStreams = getStepIOMeta().getInfoStreams(); + StreamInterface referenceStream = infoStreams.get(0); + StreamInterface compareStream = infoStreams.get(1); + + rep.saveStepAttribute(id_transformation, id_step, "reference", referenceStream.getStepname()); + rep.saveStepAttribute(id_transformation, id_step, "compare", compareStream.getStepname()); + } catch (Exception e) { + throw new KettleException(BaseMessages.getString(PKG, "MergeRowsMeta.Exception.UnableToSaveStepInfo") + + id_step, e); + } } - ValueMetaInterface flagFieldValue = new ValueMeta( flagField, ValueMetaInterface.TYPE_STRING ); - flagFieldValue.setOrigin( name ); - r.addValueMeta( flagFieldValue ); - - } - - public void check( List remarks, TransMeta transMeta, StepMeta stepMeta, - RowMetaInterface prev, String[] input, String[] output, RowMetaInterface info, VariableSpace space, - Repository repository, IMetaStore metaStore ) { - CheckResult cr; - - List infoStreams = getStepIOMeta().getInfoStreams(); - StreamInterface referenceStream = infoStreams.get( 0 ); - StreamInterface compareStream = infoStreams.get( 1 ); - - if ( referenceStream.getStepname() != null && compareStream.getStepname() != null ) { - cr = - new CheckResult( CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( - PKG, "MergeRowsMeta.CheckResult.SourceStepsOK" ), stepMeta ); - remarks.add( cr ); - } else if ( referenceStream.getStepname() == null && compareStream.getStepname() == null ) { - cr = - new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages.getString( - PKG, "MergeRowsMeta.CheckResult.SourceStepsMissing" ), stepMeta ); - remarks.add( cr ); - } else { - cr = - new CheckResult( CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( - PKG, "MergeRowsMeta.CheckResult.OneSourceStepMissing" ), stepMeta ); - remarks.add( cr ); + + public boolean chosesTargetSteps() { + return false; } - } - public StepInterface getStep( StepMeta stepMeta, StepDataInterface stepDataInterface, int cnr, TransMeta tr, - Trans trans ) { - return new MergeRows( stepMeta, stepDataInterface, cnr, tr, trans ); - } + public String[] getTargetSteps() { + return null; + } - public StepDataInterface getStepData() { - return new MergeRowsData(); - } + public void getFields(RowMetaInterface r, String name, RowMetaInterface[] info, StepMeta nextStep, + VariableSpace space, Repository repository, IMetaStore metaStore) throws KettleStepException { + // We don't have any input fields here in "r" as they are all info fields. + // So we just merge in the info fields. + // + if (info != null) { + boolean found = false; + for (int i = 0; i < info.length && !found; i++) { + if (info[i] != null) { + r.mergeRowMeta(info[i]); + found = true; + } + } + } - /** - * Returns the Input/Output metadata for this step. - */ - public StepIOMetaInterface getStepIOMeta() { - if ( ioMeta == null ) { + if (Const.isEmpty(flagField)) { + throw new KettleStepException(BaseMessages.getString(PKG, "MergeRowsMeta.Exception.FlagFieldNotSpecified")); + } + ValueMetaInterface flagFieldValue = new ValueMeta(flagField, ValueMetaInterface.TYPE_STRING); + flagFieldValue.setOrigin(name); + r.addValueMeta(flagFieldValue); - ioMeta = new StepIOMeta( true, true, false, false, false, false ); + } - ioMeta.addStream( new Stream( StreamType.INFO, null, BaseMessages.getString( - PKG, "MergeRowsMeta.InfoStream.FirstStream.Description" ), StreamIcon.INFO, null ) ); - ioMeta.addStream( new Stream( StreamType.INFO, null, BaseMessages.getString( - PKG, "MergeRowsMeta.InfoStream.SecondStream.Description" ), StreamIcon.INFO, null ) ); + public void check(List remarks, TransMeta transMeta, StepMeta stepMeta, + RowMetaInterface prev, String[] input, String[] output, RowMetaInterface info, VariableSpace space, + Repository repository, IMetaStore metaStore) { + CheckResult cr; + + List infoStreams = getStepIOMeta().getInfoStreams(); + StreamInterface referenceStream = infoStreams.get(0); + StreamInterface compareStream = infoStreams.get(1); + + if (referenceStream.getStepname() != null && compareStream.getStepname() != null) { + cr = + new CheckResult(CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( + PKG, "MergeRowsMeta.CheckResult.SourceStepsOK"), stepMeta); + remarks.add(cr); + } else if (referenceStream.getStepname() == null && compareStream.getStepname() == null) { + cr = + new CheckResult(CheckResultInterface.TYPE_RESULT_ERROR, BaseMessages.getString( + PKG, "MergeRowsMeta.CheckResult.SourceStepsMissing"), stepMeta); + remarks.add(cr); + } else { + cr = + new CheckResult(CheckResultInterface.TYPE_RESULT_OK, BaseMessages.getString( + PKG, "MergeRowsMeta.CheckResult.OneSourceStepMissing"), stepMeta); + remarks.add(cr); + } } - return ioMeta; - } + public StepInterface getStep(StepMeta stepMeta, StepDataInterface stepDataInterface, int cnr, TransMeta tr, + Trans trans) { + return new MergeRows(stepMeta, stepDataInterface, cnr, tr, trans); + } - public void resetStepIoMeta() { - } + public StepDataInterface getStepData() { + return new MergeRowsData(); + } - public TransformationType[] getSupportedTransformationTypes() { - return new TransformationType[] { TransformationType.Normal, }; - } + /** + * Returns the Input/Output metadata for this step. + */ + public StepIOMetaInterface getStepIOMeta() { + if (ioMeta == null) { + + ioMeta = new StepIOMeta(true, true, false, false, false, false); + + ioMeta.addStream(new Stream(StreamType.INFO, null, BaseMessages.getString( + PKG, "MergeRowsMeta.InfoStream.FirstStream.Description"), StreamIcon.INFO, null)); + ioMeta.addStream(new Stream(StreamType.INFO, null, BaseMessages.getString( + PKG, "MergeRowsMeta.InfoStream.SecondStream.Description"), StreamIcon.INFO, null)); + } + + return ioMeta; + } + + public void resetStepIoMeta() { + } + + public TransformationType[] getSupportedTransformationTypes() { + return new TransformationType[]{TransformationType.Normal,}; + } } diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/tableinput/TableInputMeta.java b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/tableinput/TableInputMeta.java index 8f5df7c..34e6bcb 100644 --- a/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/tableinput/TableInputMeta.java +++ b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/tableinput/TableInputMeta.java @@ -27,11 +27,7 @@ import org.pentaho.di.core.Const; import org.pentaho.di.core.database.Database; import org.pentaho.di.core.database.DatabaseMeta; -import org.pentaho.di.core.exception.KettleDatabaseException; -import org.pentaho.di.core.exception.KettleException; -import org.pentaho.di.core.exception.KettlePluginException; -import org.pentaho.di.core.exception.KettleStepException; -import org.pentaho.di.core.exception.KettleXMLException; +import org.pentaho.di.core.exception.*; import org.pentaho.di.core.row.RowDataUtil; import org.pentaho.di.core.row.RowMeta; import org.pentaho.di.core.row.RowMetaInterface; @@ -46,14 +42,7 @@ import org.pentaho.di.trans.DatabaseImpact; import org.pentaho.di.trans.Trans; import org.pentaho.di.trans.TransMeta; -import org.pentaho.di.trans.step.BaseStepMeta; -import org.pentaho.di.trans.step.StepDataInterface; -import org.pentaho.di.trans.step.StepIOMeta; -import org.pentaho.di.trans.step.StepIOMetaInterface; -import org.pentaho.di.trans.step.StepInjectionMetaEntry; -import org.pentaho.di.trans.step.StepInterface; -import org.pentaho.di.trans.step.StepMeta; -import org.pentaho.di.trans.step.StepMetaInterface; +import org.pentaho.di.trans.step.*; import org.pentaho.di.trans.step.errorhandling.Stream; import org.pentaho.di.trans.step.errorhandling.StreamIcon; import org.pentaho.di.trans.step.errorhandling.StreamInterface; @@ -68,505 +57,499 @@ * */ public class TableInputMeta extends BaseStepMeta implements StepMetaInterface { - private static Class PKG = TableInputMeta.class; // for i18n purposes, needed by Translator2!! - - private DatabaseMeta databaseMeta; - private String sql; - private String rowLimit; - - /** Should I execute once per row? */ - private boolean executeEachInputRow; - - private boolean variableReplacementActive; - - private boolean lazyConversionActive; - - public TableInputMeta() { - super(); - } - - /** - * @return Returns true if the step should be run per row - */ - public boolean isExecuteEachInputRow() { - return executeEachInputRow; - } - - /** - * @param oncePerRow - * true if the step should be run per row - */ - public void setExecuteEachInputRow( boolean oncePerRow ) { - this.executeEachInputRow = oncePerRow; - } - - /** - * @return Returns the database. - */ - public DatabaseMeta getDatabaseMeta() { - return databaseMeta; - } - - /** - * @param database - * The database to set. - */ - public void setDatabaseMeta( DatabaseMeta database ) { - this.databaseMeta = database; - } - - /** - * @return Returns the rowLimit. - */ - public String getRowLimit() { - return rowLimit; - } - - /** - * @param rowLimit - * The rowLimit to set. - */ - public void setRowLimit( String rowLimit ) { - this.rowLimit = rowLimit; - } - - /** - * @return Returns the sql. - */ - public String getSQL() { - return sql; - } - - /** - * @param sql - * The sql to set. - */ - public void setSQL( String sql ) { - this.sql = sql; - } - - public void loadXML( Node stepnode, List databases, IMetaStore metaStore ) throws KettleXMLException { - readData( stepnode, databases ); - } - - public Object clone() { - TableInputMeta retval = (TableInputMeta) super.clone(); - return retval; - } - - private void readData( Node stepnode, List databases ) throws KettleXMLException { - try { - databaseMeta = DatabaseMeta.findDatabase( databases, XMLHandler.getTagValue( stepnode, "connection" ) ); - sql = XMLHandler.getTagValue( stepnode, "sql" ); - rowLimit = XMLHandler.getTagValue( stepnode, "limit" ); - - String lookupFromStepname = XMLHandler.getTagValue( stepnode, "lookup" ); - StreamInterface infoStream = getStepIOMeta().getInfoStreams().get( 0 ); - infoStream.setSubject( lookupFromStepname ); - - executeEachInputRow = "Y".equals( XMLHandler.getTagValue( stepnode, "execute_each_row" ) ); - variableReplacementActive = "Y".equals( XMLHandler.getTagValue( stepnode, "variables_active" ) ); - lazyConversionActive = "Y".equals( XMLHandler.getTagValue( stepnode, "lazy_conversion_active" ) ); - } catch ( Exception e ) { - throw new KettleXMLException( "Unable to load step info from XML", e ); - } - } - - public void setDefault() { - databaseMeta = null; - sql = "SELECT FROM

WHERE "; - rowLimit = "0"; - } - - protected Database getDatabase() { - // Added for test purposes - return new Database( loggingObject, databaseMeta ); - } - - public void getFields( RowMetaInterface row, String origin, RowMetaInterface[] info, StepMeta nextStep, - VariableSpace space, Repository repository, IMetaStore metaStore ) throws KettleStepException { - if ( databaseMeta == null ) { - return; // TODO: throw an exception here - } - - boolean param = false; - - Database db = getDatabase(); - databases = new Database[] { db }; // keep track of it for canceling purposes... - - // First try without connecting to the database... (can be S L O W) - String sNewSQL = sql; - if ( isVariableReplacementActive() ) { - sNewSQL = db.environmentSubstitute( sql ); - if ( space != null ) { - sNewSQL = space.environmentSubstitute( sNewSQL ); - } - } - - RowMetaInterface add = null; - try { - add = db.getQueryFields( sNewSQL, param ); - } catch ( KettleDatabaseException dbe ) { - throw new KettleStepException( "Unable to get queryfields for SQL: " + Const.CR + sNewSQL, dbe ); - } - - if ( add != null ) { - for ( int i = 0; i < add.size(); i++ ) { - ValueMetaInterface v = add.getValueMeta( i ); - v.setOrigin( origin ); - } - row.addRowMeta( add ); - } else { - try { - db.connect(); - - RowMetaInterface paramRowMeta = null; - Object[] paramData = null; - - StreamInterface infoStream = getStepIOMeta().getInfoStreams().get( 0 ); - if ( !Const.isEmpty( infoStream.getStepname() ) ) { - param = true; - if ( info.length >= 0 && info[0] != null ) { - paramRowMeta = info[0]; - paramData = RowDataUtil.allocateRowData( paramRowMeta.size() ); - } + private static Class PKG = TableInputMeta.class; // for i18n purposes, needed by Translator2!! + + private DatabaseMeta databaseMeta; + private String sql; + private String rowLimit; + + /** + * Should I execute once per row? + */ + private boolean executeEachInputRow; + + private boolean variableReplacementActive; + + private boolean lazyConversionActive; + + public TableInputMeta() { + super(); + } + + /** + * @return Returns true if the step should be run per row + */ + public boolean isExecuteEachInputRow() { + return executeEachInputRow; + } + + /** + * @param oncePerRow true if the step should be run per row + */ + public void setExecuteEachInputRow(boolean oncePerRow) { + this.executeEachInputRow = oncePerRow; + } + + /** + * @return Returns the database. + */ + public DatabaseMeta getDatabaseMeta() { + return databaseMeta; + } + + /** + * @param database The database to set. + */ + public void setDatabaseMeta(DatabaseMeta database) { + this.databaseMeta = database; + } + + /** + * @return Returns the rowLimit. + */ + public String getRowLimit() { + return rowLimit; + } + + /** + * @param rowLimit The rowLimit to set. + */ + public void setRowLimit(String rowLimit) { + this.rowLimit = rowLimit; + } + + /** + * @return Returns the sql. + */ + public String getSQL() { + return sql; + } + + /** + * @param sql The sql to set. + */ + public void setSQL(String sql) { + this.sql = sql; + } + + public void loadXML(Node stepnode, List databases, IMetaStore metaStore) throws KettleXMLException { + readData(stepnode, databases); + } + + public Object clone() { + TableInputMeta retval = (TableInputMeta) super.clone(); + return retval; + } + + private void readData(Node stepnode, List databases) throws KettleXMLException { + try { + databaseMeta = DatabaseMeta.findDatabase(databases, XMLHandler.getTagValue(stepnode, "connection")); + sql = XMLHandler.getTagValue(stepnode, "sql"); + rowLimit = XMLHandler.getTagValue(stepnode, "limit"); + + String lookupFromStepname = XMLHandler.getTagValue(stepnode, "lookup"); + StreamInterface infoStream = getStepIOMeta().getInfoStreams().get(0); + infoStream.setSubject(lookupFromStepname); + + executeEachInputRow = "Y".equals(XMLHandler.getTagValue(stepnode, "execute_each_row")); + variableReplacementActive = "Y".equals(XMLHandler.getTagValue(stepnode, "variables_active")); + lazyConversionActive = "Y".equals(XMLHandler.getTagValue(stepnode, "lazy_conversion_active")); + } catch (Exception e) { + throw new KettleXMLException("Unable to load step info from XML", e); } + } + + public void setDefault() { + databaseMeta = null; + sql = "SELECT FROM
WHERE "; + rowLimit = "0"; + } - add = db.getQueryFields( sNewSQL, param, paramRowMeta, paramData ); + protected Database getDatabase() { + // Added for test purposes + return new Database(loggingObject, databaseMeta); + } - if ( add == null ) { - return; + public void getFields(RowMetaInterface row, String origin, RowMetaInterface[] info, StepMeta nextStep, + VariableSpace space, Repository repository, IMetaStore metaStore) throws KettleStepException { + if (databaseMeta == null) { + return; // TODO: throw an exception here } - for ( int i = 0; i < add.size(); i++ ) { - ValueMetaInterface v = add.getValueMeta( i ); - v.setOrigin( origin ); + + boolean param = false; + + Database db = getDatabase(); + databases = new Database[]{db}; // keep track of it for canceling purposes... + + // First try without connecting to the database... (can be S L O W) + String sNewSQL = sql; + if (isVariableReplacementActive()) { + sNewSQL = db.environmentSubstitute(sql); + if (space != null) { + sNewSQL = space.environmentSubstitute(sNewSQL); + } } - row.addRowMeta( add ); - } catch ( KettleException ke ) { - throw new KettleStepException( "Unable to get queryfields for SQL: " + Const.CR + sNewSQL, ke ); - } finally { - db.disconnect(); - } - } - if ( isLazyConversionActive() ) { - for ( int i = 0; i < row.size(); i++ ) { - ValueMetaInterface v = row.getValueMeta( i ); + + RowMetaInterface add = null; try { - if ( v.getType() == ValueMetaInterface.TYPE_STRING ) { - ValueMetaInterface storageMeta = ValueMetaFactory.cloneValueMeta( v ); - storageMeta.setStorageType( ValueMetaInterface.STORAGE_TYPE_NORMAL ); - v.setStorageMetadata( storageMeta ); - v.setStorageType( ValueMetaInterface.STORAGE_TYPE_BINARY_STRING ); - } - } catch ( KettlePluginException e ) { - throw new KettleStepException( "Unable to clone meta for lazy conversion: " + Const.CR + v, e ); + add = db.getQueryFields(sNewSQL, param); + } catch (KettleDatabaseException dbe) { + throw new KettleStepException("Unable to get queryfields for SQL: " + Const.CR + sNewSQL, dbe); } - } - } - } - - public String getXML() { - StringBuffer retval = new StringBuffer(); - - retval.append( " " - + XMLHandler.addTagValue( "connection", databaseMeta == null ? "" : databaseMeta.getName() ) ); - retval.append( " " + XMLHandler.addTagValue( "sql", sql ) ); - retval.append( " " + XMLHandler.addTagValue( "limit", rowLimit ) ); - StreamInterface infoStream = getStepIOMeta().getInfoStreams().get( 0 ); - retval.append( " " + XMLHandler.addTagValue( "lookup", infoStream.getStepname() ) ); - retval.append( " " + XMLHandler.addTagValue( "execute_each_row", executeEachInputRow ) ); - retval.append( " " + XMLHandler.addTagValue( "variables_active", variableReplacementActive ) ); - retval.append( " " + XMLHandler.addTagValue( "lazy_conversion_active", lazyConversionActive ) ); - - return retval.toString(); - } - - public void readRep( Repository rep, IMetaStore metaStore, ObjectId id_step, List databases ) throws KettleException { - try { - databaseMeta = rep.loadDatabaseMetaFromStepAttribute( id_step, "id_connection", databases ); - - sql = rep.getStepAttributeString( id_step, "sql" ); - rowLimit = rep.getStepAttributeString( id_step, "limit" ); - if ( rowLimit == null ) { - rowLimit = Long.toString( rep.getStepAttributeInteger( id_step, "limit" ) ); - } - - String lookupFromStepname = rep.getStepAttributeString( id_step, "lookup" ); - StreamInterface infoStream = getStepIOMeta().getInfoStreams().get( 0 ); - infoStream.setSubject( lookupFromStepname ); - - executeEachInputRow = rep.getStepAttributeBoolean( id_step, "execute_each_row" ); - variableReplacementActive = rep.getStepAttributeBoolean( id_step, "variables_active" ); - lazyConversionActive = rep.getStepAttributeBoolean( id_step, "lazy_conversion_active" ); - } catch ( Exception e ) { - throw new KettleException( "Unexpected error reading step information from the repository", e ); - } - } - - public void saveRep( Repository rep, IMetaStore metaStore, ObjectId id_transformation, ObjectId id_step ) throws KettleException { - try { - rep.saveDatabaseMetaStepAttribute( id_transformation, id_step, "id_connection", databaseMeta ); - rep.saveStepAttribute( id_transformation, id_step, "sql", sql ); - rep.saveStepAttribute( id_transformation, id_step, "limit", rowLimit ); - StreamInterface infoStream = getStepIOMeta().getInfoStreams().get( 0 ); - rep.saveStepAttribute( id_transformation, id_step, "lookup", infoStream.getStepname() ); - rep.saveStepAttribute( id_transformation, id_step, "execute_each_row", executeEachInputRow ); - rep.saveStepAttribute( id_transformation, id_step, "variables_active", variableReplacementActive ); - rep.saveStepAttribute( id_transformation, id_step, "lazy_conversion_active", lazyConversionActive ); - - // Also, save the step-database relationship! - if ( databaseMeta != null ) { - rep.insertStepDatabase( id_transformation, id_step, databaseMeta.getObjectId() ); - } - } catch ( Exception e ) { - throw new KettleException( "Unable to save step information to the repository for id_step=" + id_step, e ); - } - } - - public void check( List remarks, TransMeta transMeta, StepMeta stepMeta, - RowMetaInterface prev, String[] input, String[] output, RowMetaInterface info, VariableSpace space, - Repository repository, IMetaStore metaStore ) { - CheckResult cr; - - if ( databaseMeta != null ) { - cr = new CheckResult( CheckResultInterface.TYPE_RESULT_OK, "Connection exists", stepMeta ); - remarks.add( cr ); - - Database db = new Database( loggingObject, databaseMeta ); - db.shareVariablesWith( transMeta ); - databases = new Database[] { db }; // keep track of it for canceling purposes... - - try { - db.connect(); - cr = new CheckResult( CheckResultInterface.TYPE_RESULT_OK, "Connection to database OK", stepMeta ); - remarks.add( cr ); - - if ( sql != null && sql.length() != 0 ) { - cr = new CheckResult( CheckResultInterface.TYPE_RESULT_OK, "SQL statement is entered", stepMeta ); - remarks.add( cr ); + + if (add != null) { + for (int i = 0; i < add.size(); i++) { + ValueMetaInterface v = add.getValueMeta(i); + v.setOrigin(origin); + } + row.addRowMeta(add); } else { - cr = new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, "SQL statement is missing.", stepMeta ); - remarks.add( cr ); + try { + db.connect(); + + RowMetaInterface paramRowMeta = null; + Object[] paramData = null; + + StreamInterface infoStream = getStepIOMeta().getInfoStreams().get(0); + if (!Const.isEmpty(infoStream.getStepname())) { + param = true; + if (info.length >= 0 && info[0] != null) { + paramRowMeta = info[0]; + paramData = RowDataUtil.allocateRowData(paramRowMeta.size()); + } + } + + add = db.getQueryFields(sNewSQL, param, paramRowMeta, paramData); + + if (add == null) { + return; + } + for (int i = 0; i < add.size(); i++) { + ValueMetaInterface v = add.getValueMeta(i); + v.setOrigin(origin); + } + row.addRowMeta(add); + } catch (KettleException ke) { + throw new KettleStepException("Unable to get queryfields for SQL: " + Const.CR + sNewSQL, ke); + } finally { + db.disconnect(); + } } - } catch ( KettleException e ) { - cr = - new CheckResult( - CheckResultInterface.TYPE_RESULT_ERROR, "An error occurred: " + e.getMessage(), stepMeta ); - remarks.add( cr ); - } finally { - db.disconnect(); - } - } else { - cr = - new CheckResult( - CheckResultInterface.TYPE_RESULT_ERROR, "Please select or create a connection to use", stepMeta ); - remarks.add( cr ); - } - - // See if we have an informative step... - StreamInterface infoStream = getStepIOMeta().getInfoStreams().get( 0 ); - if ( !Const.isEmpty( infoStream.getStepname() ) ) { - boolean found = false; - for ( int i = 0; i < input.length; i++ ) { - if ( infoStream.getStepname().equalsIgnoreCase( input[i] ) ) { - found = true; + if (isLazyConversionActive()) { + for (int i = 0; i < row.size(); i++) { + ValueMetaInterface v = row.getValueMeta(i); + try { + if (v.getType() == ValueMetaInterface.TYPE_STRING) { + ValueMetaInterface storageMeta = ValueMetaFactory.cloneValueMeta(v); + storageMeta.setStorageType(ValueMetaInterface.STORAGE_TYPE_NORMAL); + v.setStorageMetadata(storageMeta); + v.setStorageType(ValueMetaInterface.STORAGE_TYPE_BINARY_STRING); + } + } catch (KettlePluginException e) { + throw new KettleStepException("Unable to clone meta for lazy conversion: " + Const.CR + v, e); + } + } } - } - if ( found ) { - cr = - new CheckResult( CheckResultInterface.TYPE_RESULT_OK, "Previous step to read info from [" - + infoStream.getStepname() + "] is found.", stepMeta ); - remarks.add( cr ); - } else { - cr = - new CheckResult( CheckResultInterface.TYPE_RESULT_ERROR, "Previous step to read info from [" - + infoStream.getStepname() + "] is not found.", stepMeta ); - remarks.add( cr ); - } - - // Count the number of ? in the SQL string: - int count = 0; - for ( int i = 0; i < sql.length(); i++ ) { - char c = sql.charAt( i ); - if ( c == '\'' ) { // skip to next quote! - do { - i++; - c = sql.charAt( i ); - } while ( c != '\'' ); + } + + public String getXML() { + StringBuffer retval = new StringBuffer(); + + retval.append(" " + + XMLHandler.addTagValue("connection", databaseMeta == null ? "" : databaseMeta.getName())); + retval.append(" " + XMLHandler.addTagValue("sql", sql)); + retval.append(" " + XMLHandler.addTagValue("limit", rowLimit)); + StreamInterface infoStream = getStepIOMeta().getInfoStreams().get(0); + retval.append(" " + XMLHandler.addTagValue("lookup", infoStream.getStepname())); + retval.append(" " + XMLHandler.addTagValue("execute_each_row", executeEachInputRow)); + retval.append(" " + XMLHandler.addTagValue("variables_active", variableReplacementActive)); + retval.append(" " + XMLHandler.addTagValue("lazy_conversion_active", lazyConversionActive)); + + return retval.toString(); + } + + public void readRep(Repository rep, IMetaStore metaStore, ObjectId id_step, List databases) throws KettleException { + try { + databaseMeta = rep.loadDatabaseMetaFromStepAttribute(id_step, "id_connection", databases); + + sql = rep.getStepAttributeString(id_step, "sql"); + rowLimit = rep.getStepAttributeString(id_step, "limit"); + if (rowLimit == null) { + rowLimit = Long.toString(rep.getStepAttributeInteger(id_step, "limit")); + } + + String lookupFromStepname = rep.getStepAttributeString(id_step, "lookup"); + StreamInterface infoStream = getStepIOMeta().getInfoStreams().get(0); + infoStream.setSubject(lookupFromStepname); + + executeEachInputRow = rep.getStepAttributeBoolean(id_step, "execute_each_row"); + variableReplacementActive = rep.getStepAttributeBoolean(id_step, "variables_active"); + lazyConversionActive = rep.getStepAttributeBoolean(id_step, "lazy_conversion_active"); + } catch (Exception e) { + throw new KettleException("Unexpected error reading step information from the repository", e); } - if ( c == '?' ) { - count++; + } + + public void saveRep(Repository rep, IMetaStore metaStore, ObjectId id_transformation, ObjectId id_step) throws KettleException { + try { + rep.saveDatabaseMetaStepAttribute(id_transformation, id_step, "id_connection", databaseMeta); + rep.saveStepAttribute(id_transformation, id_step, "sql", sql); + rep.saveStepAttribute(id_transformation, id_step, "limit", rowLimit); + StreamInterface infoStream = getStepIOMeta().getInfoStreams().get(0); + rep.saveStepAttribute(id_transformation, id_step, "lookup", infoStream.getStepname()); + rep.saveStepAttribute(id_transformation, id_step, "execute_each_row", executeEachInputRow); + rep.saveStepAttribute(id_transformation, id_step, "variables_active", variableReplacementActive); + rep.saveStepAttribute(id_transformation, id_step, "lazy_conversion_active", lazyConversionActive); + + // Also, save the step-database relationship! + if (databaseMeta != null) { + rep.insertStepDatabase(id_transformation, id_step, databaseMeta.getObjectId()); + } + } catch (Exception e) { + throw new KettleException("Unable to save step information to the repository for id_step=" + id_step, e); } - } - // Verify with the number of informative fields... - if ( info != null ) { - if ( count == info.size() ) { - cr = - new CheckResult( CheckResultInterface.TYPE_RESULT_OK, "This step is expecting and receiving " - + info.size() + " fields of input from the previous step.", stepMeta ); - remarks.add( cr ); + } + + public void check(List remarks, TransMeta transMeta, StepMeta stepMeta, + RowMetaInterface prev, String[] input, String[] output, RowMetaInterface info, VariableSpace space, + Repository repository, IMetaStore metaStore) { + CheckResult cr; + + if (databaseMeta != null) { + cr = new CheckResult(CheckResultInterface.TYPE_RESULT_OK, "Connection exists", stepMeta); + remarks.add(cr); + + Database db = new Database(loggingObject, databaseMeta); + db.shareVariablesWith(transMeta); + databases = new Database[]{db}; // keep track of it for canceling purposes... + + try { + db.connect(); + cr = new CheckResult(CheckResultInterface.TYPE_RESULT_OK, "Connection to database OK", stepMeta); + remarks.add(cr); + + if (sql != null && sql.length() != 0) { + cr = new CheckResult(CheckResultInterface.TYPE_RESULT_OK, "SQL statement is entered", stepMeta); + remarks.add(cr); + } else { + cr = new CheckResult(CheckResultInterface.TYPE_RESULT_ERROR, "SQL statement is missing.", stepMeta); + remarks.add(cr); + } + } catch (KettleException e) { + cr = + new CheckResult( + CheckResultInterface.TYPE_RESULT_ERROR, "An error occurred: " + e.getMessage(), stepMeta); + remarks.add(cr); + } finally { + db.disconnect(); + } + } else { + cr = + new CheckResult( + CheckResultInterface.TYPE_RESULT_ERROR, "Please select or create a connection to use", stepMeta); + remarks.add(cr); + } + + // See if we have an informative step... + StreamInterface infoStream = getStepIOMeta().getInfoStreams().get(0); + if (!Const.isEmpty(infoStream.getStepname())) { + boolean found = false; + for (int i = 0; i < input.length; i++) { + if (infoStream.getStepname().equalsIgnoreCase(input[i])) { + found = true; + } + } + if (found) { + cr = + new CheckResult(CheckResultInterface.TYPE_RESULT_OK, "Previous step to read info from [" + + infoStream.getStepname() + "] is found.", stepMeta); + remarks.add(cr); + } else { + cr = + new CheckResult(CheckResultInterface.TYPE_RESULT_ERROR, "Previous step to read info from [" + + infoStream.getStepname() + "] is not found.", stepMeta); + remarks.add(cr); + } + + // Count the number of ? in the SQL string: + int count = 0; + for (int i = 0; i < sql.length(); i++) { + char c = sql.charAt(i); + if (c == '\'') { // skip to next quote! + do { + i++; + c = sql.charAt(i); + } while (c != '\''); + } + if (c == '?') { + count++; + } + } + // Verify with the number of informative fields... + if (info != null) { + if (count == info.size()) { + cr = + new CheckResult(CheckResultInterface.TYPE_RESULT_OK, "This step is expecting and receiving " + + info.size() + " fields of input from the previous step.", stepMeta); + remarks.add(cr); + } else { + cr = + new CheckResult( + CheckResultInterface.TYPE_RESULT_ERROR, "This step is receiving " + + info.size() + " but not the expected " + count + + " fields of input from the previous step.", stepMeta); + remarks.add(cr); + } + } else { + cr = + new CheckResult( + CheckResultInterface.TYPE_RESULT_ERROR, "Input step name is not recognized!", stepMeta); + remarks.add(cr); + } + } else { + if (input.length > 0) { + cr = + new CheckResult( + CheckResultInterface.TYPE_RESULT_ERROR, "Step is not expecting info from input steps.", stepMeta); + remarks.add(cr); + } else { + cr = + new CheckResult( + CheckResultInterface.TYPE_RESULT_OK, "No input expected, no input provided.", stepMeta); + remarks.add(cr); + } + + } + } + + /** + * @param steps optionally search the info step in a list of steps + */ + public void searchInfoAndTargetSteps(List steps) { + for (StreamInterface stream : getStepIOMeta().getInfoStreams()) { + stream.setStepMeta(StepMeta.findStep(steps, (String) stream.getSubject())); + } + } + + public StepInterface getStep(StepMeta stepMeta, StepDataInterface stepDataInterface, int cnr, + TransMeta transMeta, Trans trans) { + return new TableInput(stepMeta, stepDataInterface, cnr, transMeta, trans); + } + + public StepDataInterface getStepData() { + return new TableInputData(); + } + + @Override + public void analyseImpact(List impact, TransMeta transMeta, StepMeta stepMeta, + RowMetaInterface prev, String[] input, String[] output, RowMetaInterface info, Repository repository, + IMetaStore metaStore) throws KettleStepException { + + if (stepMeta.getName().equalsIgnoreCase("cdc_cust")) { + System.out.println("HERE!"); + } + + // Find the lookupfields... + RowMetaInterface out = new RowMeta(); + // TODO: this builds, but does it work in all cases. + getFields(out, stepMeta.getName(), new RowMetaInterface[]{info}, null, transMeta, repository, metaStore); + + if (out != null) { + for (int i = 0; i < out.size(); i++) { + ValueMetaInterface outvalue = out.getValueMeta(i); + DatabaseImpact ii = + new DatabaseImpact( + DatabaseImpact.TYPE_IMPACT_READ, transMeta.getName(), stepMeta.getName(), databaseMeta + .getDatabaseName(), "", outvalue.getName(), outvalue.getName(), stepMeta.getName(), sql, + "read from one or more database tables via SQL statement"); + impact.add(ii); + + } + } + } + + public DatabaseMeta[] getUsedDatabaseConnections() { + if (databaseMeta != null) { + return new DatabaseMeta[]{databaseMeta}; } else { - cr = - new CheckResult( - CheckResultInterface.TYPE_RESULT_ERROR, "This step is receiving " - + info.size() + " but not the expected " + count - + " fields of input from the previous step.", stepMeta ); - remarks.add( cr ); + return super.getUsedDatabaseConnections(); } - } else { - cr = - new CheckResult( - CheckResultInterface.TYPE_RESULT_ERROR, "Input step name is not recognized!", stepMeta ); - remarks.add( cr ); - } - } else { - if ( input.length > 0 ) { - cr = - new CheckResult( - CheckResultInterface.TYPE_RESULT_ERROR, "Step is not expecting info from input steps.", stepMeta ); - remarks.add( cr ); - } else { - cr = - new CheckResult( - CheckResultInterface.TYPE_RESULT_OK, "No input expected, no input provided.", stepMeta ); - remarks.add( cr ); - } - - } - } - - /** - * @param steps - * optionally search the info step in a list of steps - */ - public void searchInfoAndTargetSteps( List steps ) { - for ( StreamInterface stream : getStepIOMeta().getInfoStreams() ) { - stream.setStepMeta( StepMeta.findStep( steps, (String) stream.getSubject() ) ); - } - } - - public StepInterface getStep( StepMeta stepMeta, StepDataInterface stepDataInterface, int cnr, - TransMeta transMeta, Trans trans ) { - return new TableInput( stepMeta, stepDataInterface, cnr, transMeta, trans ); - } - - public StepDataInterface getStepData() { - return new TableInputData(); - } - - @Override - public void analyseImpact( List impact, TransMeta transMeta, StepMeta stepMeta, - RowMetaInterface prev, String[] input, String[] output, RowMetaInterface info, Repository repository, - IMetaStore metaStore ) throws KettleStepException { - - if ( stepMeta.getName().equalsIgnoreCase( "cdc_cust" ) ) { - System.out.println( "HERE!" ); - } - - // Find the lookupfields... - RowMetaInterface out = new RowMeta(); - // TODO: this builds, but does it work in all cases. - getFields( out, stepMeta.getName(), new RowMetaInterface[] { info }, null, transMeta, repository, metaStore ); - - if ( out != null ) { - for ( int i = 0; i < out.size(); i++ ) { - ValueMetaInterface outvalue = out.getValueMeta( i ); - DatabaseImpact ii = - new DatabaseImpact( - DatabaseImpact.TYPE_IMPACT_READ, transMeta.getName(), stepMeta.getName(), databaseMeta - .getDatabaseName(), "", outvalue.getName(), outvalue.getName(), stepMeta.getName(), sql, - "read from one or more database tables via SQL statement" ); - impact.add( ii ); - - } - } - } - - public DatabaseMeta[] getUsedDatabaseConnections() { - if ( databaseMeta != null ) { - return new DatabaseMeta[] { databaseMeta }; - } else { - return super.getUsedDatabaseConnections(); - } - } - - /** - * @return Returns the variableReplacementActive. - */ - public boolean isVariableReplacementActive() { - return variableReplacementActive; - } - - /** - * @param variableReplacementActive - * The variableReplacementActive to set. - */ - public void setVariableReplacementActive( boolean variableReplacementActive ) { - this.variableReplacementActive = variableReplacementActive; - } - - /** - * @return the lazyConversionActive - */ - public boolean isLazyConversionActive() { - return lazyConversionActive; - } - - /** - * @param lazyConversionActive - * the lazyConversionActive to set - */ - public void setLazyConversionActive( boolean lazyConversionActive ) { - this.lazyConversionActive = lazyConversionActive; - } - - /** - * Returns the Input/Output metadata for this step. The generator step only produces output, does not accept input! - */ - public StepIOMetaInterface getStepIOMeta() { - if ( ioMeta == null ) { - - ioMeta = new StepIOMeta( true, true, false, false, false, false ); - - StreamInterface stream = - new Stream( - StreamType.INFO, null, BaseMessages.getString( PKG, "TableInputMeta.InfoStream.Description" ), - StreamIcon.INFO, null ); - ioMeta.addStream( stream ); - } - - return ioMeta; - } - - public void resetStepIoMeta() { - // Do nothing, don't reset as there is no need to do this. - } - - /** - * For compatibility, wraps around the standard step IO metadata - * - * @param stepMeta - * The step where you read lookup data from - */ - public void setLookupFromStep( StepMeta stepMeta ) { - getStepIOMeta().getInfoStreams().get( 0 ).setStepMeta( stepMeta ); - } - - /** - * For compatibility, wraps around the standard step IO metadata - * - * @return The step where you read lookup data from - */ - public StepMeta getLookupFromStep() { - return getStepIOMeta().getInfoStreams().get( 0 ).getStepMeta(); - } - - @Override - public TableInputMetaInjection getStepMetaInjectionInterface() { - return new TableInputMetaInjection( this ); - } - - public List extractStepMetadataEntries() throws KettleException { - return getStepMetaInjectionInterface().extractStepMetadataEntries(); - } + } + + /** + * @return Returns the variableReplacementActive. + */ + public boolean isVariableReplacementActive() { + return variableReplacementActive; + } + + /** + * @param variableReplacementActive The variableReplacementActive to set. + */ + public void setVariableReplacementActive(boolean variableReplacementActive) { + this.variableReplacementActive = variableReplacementActive; + } + + /** + * @return the lazyConversionActive + */ + public boolean isLazyConversionActive() { + return lazyConversionActive; + } + + /** + * @param lazyConversionActive the lazyConversionActive to set + */ + public void setLazyConversionActive(boolean lazyConversionActive) { + this.lazyConversionActive = lazyConversionActive; + } + + /** + * Returns the Input/Output metadata for this step. The generator step only produces output, does not accept input! + */ + public StepIOMetaInterface getStepIOMeta() { + if (ioMeta == null) { + + ioMeta = new StepIOMeta(true, true, false, false, false, false); + + StreamInterface stream = + new Stream( + StreamType.INFO, null, BaseMessages.getString(PKG, "TableInputMeta.InfoStream.Description"), + StreamIcon.INFO, null); + ioMeta.addStream(stream); + } + + return ioMeta; + } + + public void resetStepIoMeta() { + // Do nothing, don't reset as there is no need to do this. + } + + /** + * For compatibility, wraps around the standard step IO metadata + * + * @param stepMeta The step where you read lookup data from + */ + public void setLookupFromStep(StepMeta stepMeta) { + getStepIOMeta().getInfoStreams().get(0).setStepMeta(stepMeta); + } + + /** + * For compatibility, wraps around the standard step IO metadata + * + * @return The step where you read lookup data from + */ + public StepMeta getLookupFromStep() { + return getStepIOMeta().getInfoStreams().get(0).getStepMeta(); + } + + @Override + public TableInputMetaInjection getStepMetaInjectionInterface() { + return new TableInputMetaInjection(this); + } + + public List extractStepMetadataEntries() throws KettleException { + return getStepMetaInjectionInterface().extractStepMetadataEntries(); + } } diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/www/Carte.java b/pentaho-kettle/src/main/java/org/pentaho/di/www/Carte.java index beb051a..3e1cd74 100644 --- a/pentaho-kettle/src/main/java/org/pentaho/di/www/Carte.java +++ b/pentaho-kettle/src/main/java/org/pentaho/di/www/Carte.java @@ -22,20 +22,13 @@ package org.pentaho.di.www; -import java.io.PrintWriter; -import java.io.StringWriter; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Properties; - -import org.apache.commons.cli.BasicParser; -import org.apache.commons.cli.CommandLine; -import org.apache.commons.cli.CommandLineParser; -import org.apache.commons.cli.HelpFormatter; -import org.apache.commons.cli.OptionBuilder; -import org.apache.commons.cli.Options; -import org.apache.commons.cli.ParseException; +import com.sun.jersey.api.client.Client; +import com.sun.jersey.api.client.WebResource; +import com.sun.jersey.api.client.config.ClientConfig; +import com.sun.jersey.api.client.config.DefaultClientConfig; +import com.sun.jersey.api.client.filter.HTTPBasicAuthFilter; +import com.sun.jersey.api.json.JSONConfiguration; +import org.apache.commons.cli.*; import org.apache.commons.lang.StringUtils; import org.apache.commons.vfs2.FileObject; import org.pentaho.di.cluster.SlaveServer; @@ -52,340 +45,338 @@ import org.w3c.dom.Document; import org.w3c.dom.Node; -import com.sun.jersey.api.client.Client; -import com.sun.jersey.api.client.WebResource; -import com.sun.jersey.api.client.config.ClientConfig; -import com.sun.jersey.api.client.config.DefaultClientConfig; -import com.sun.jersey.api.client.filter.HTTPBasicAuthFilter; -import com.sun.jersey.api.json.JSONConfiguration; +import java.io.PrintWriter; +import java.io.StringWriter; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Properties; public class Carte { - private static Class PKG = Carte.class; // for i18n purposes, needed by Translator2!! - - private WebServer webServer; - private SlaveServerConfig config; - private boolean allOK; - private static Options options; - - public Carte( final SlaveServerConfig config ) throws Exception { - this( config, null ); - } - - public Carte( final SlaveServerConfig config, Boolean joinOverride ) throws Exception { - this.config = config; - - allOK = true; - - CarteSingleton.setSlaveServerConfig( config ); - LogChannelInterface log = CarteSingleton.getInstance().getLog(); - - final TransformationMap transformationMap = CarteSingleton.getInstance().getTransformationMap(); - transformationMap.setSlaveServerConfig( config ); - final JobMap jobMap = CarteSingleton.getInstance().getJobMap(); - jobMap.setSlaveServerConfig( config ); - List detections = Collections.synchronizedList( new ArrayList() ); - SocketRepository socketRepository = CarteSingleton.getInstance().getSocketRepository(); - - SlaveServer slaveServer = config.getSlaveServer(); - - String hostname = slaveServer.getHostname(); - int port = WebServer.PORT; - if ( !Const.isEmpty( slaveServer.getPort() ) ) { - try { - port = Integer.parseInt( slaveServer.getPort() ); - } catch ( Exception e ) { - log.logError( BaseMessages.getString( PKG, "Carte.Error.CanNotPartPort", slaveServer.getHostname(), "" + port ), - e ); - allOK = false; - } + private static Class PKG = Carte.class; // for i18n purposes, needed by Translator2!! + + private WebServer webServer; + private SlaveServerConfig config; + private boolean allOK; + private static Options options; + + public Carte(final SlaveServerConfig config) throws Exception { + this(config, null); } - // TODO: see if we need to keep doing this on a periodic basis. - // The master might be dead or not alive yet at the time we send this message. - // Repeating the registration over and over every few minutes might harden this sort of problems. - // - Properties masterProperties = null; - if ( config.isReportingToMasters() ) { - String propertiesMaster = slaveServer.getPropertiesMasterName(); - for ( final SlaveServer master : config.getMasters() ) { - // Here we use the username/password specified in the slave server section of the configuration. - // This doesn't have to be the same pair as the one used on the master! + public Carte(final SlaveServerConfig config, Boolean joinOverride) throws Exception { + this.config = config; + + allOK = true; + + CarteSingleton.setSlaveServerConfig(config); + LogChannelInterface log = CarteSingleton.getInstance().getLog(); + + final TransformationMap transformationMap = CarteSingleton.getInstance().getTransformationMap(); + transformationMap.setSlaveServerConfig(config); + final JobMap jobMap = CarteSingleton.getInstance().getJobMap(); + jobMap.setSlaveServerConfig(config); + List detections = Collections.synchronizedList(new ArrayList()); + SocketRepository socketRepository = CarteSingleton.getInstance().getSocketRepository(); + + SlaveServer slaveServer = config.getSlaveServer(); + + String hostname = slaveServer.getHostname(); + int port = WebServer.PORT; + if (!Const.isEmpty(slaveServer.getPort())) { + try { + port = Integer.parseInt(slaveServer.getPort()); + } catch (Exception e) { + log.logError(BaseMessages.getString(PKG, "Carte.Error.CanNotPartPort", slaveServer.getHostname(), "" + port), + e); + allOK = false; + } + } + + // TODO: see if we need to keep doing this on a periodic basis. + // The master might be dead or not alive yet at the time we send this message. + // Repeating the registration over and over every few minutes might harden this sort of problems. // - try { - SlaveServerDetection slaveServerDetection = new SlaveServerDetection( slaveServer.getClient() ); - master.sendXML( slaveServerDetection.getXML(), RegisterSlaveServlet.CONTEXT_PATH + "/" ); - log.logBasic( "Registered this slave server to master slave server [" + master.toString() + "] on address [" - + master.getServerAndPort() + "]" ); - } catch ( Exception e ) { - log.logError( "Unable to register to master slave server [" + master.toString() + "] on address [" + master - .getServerAndPort() + "]" ); - allOK = false; + Properties masterProperties = null; + if (config.isReportingToMasters()) { + String propertiesMaster = slaveServer.getPropertiesMasterName(); + for (final SlaveServer master : config.getMasters()) { + // Here we use the username/password specified in the slave server section of the configuration. + // This doesn't have to be the same pair as the one used on the master! + // + try { + SlaveServerDetection slaveServerDetection = new SlaveServerDetection(slaveServer.getClient()); + master.sendXML(slaveServerDetection.getXML(), RegisterSlaveServlet.CONTEXT_PATH + "/"); + log.logBasic("Registered this slave server to master slave server [" + master.toString() + "] on address [" + + master.getServerAndPort() + "]"); + } catch (Exception e) { + log.logError("Unable to register to master slave server [" + master.toString() + "] on address [" + master + .getServerAndPort() + "]"); + allOK = false; + } + try { + if (!StringUtils.isBlank(propertiesMaster) && propertiesMaster.equalsIgnoreCase(master.getName())) { + if (masterProperties != null) { + log.logError("More than one primary master server. Master name is " + propertiesMaster); + } else { + masterProperties = master.getKettleProperties(); + log.logBasic("Got properties from master server [" + master.toString() + "], address [" + master + .getServerAndPort() + "]"); + } + } + } catch (Exception e) { + log.logError("Unable to get properties from master server [" + master.toString() + "], address [" + master + .getServerAndPort() + "]"); + allOK = false; + } + } } - try { - if ( !StringUtils.isBlank( propertiesMaster ) && propertiesMaster.equalsIgnoreCase( master.getName() ) ) { - if ( masterProperties != null ) { - log.logError( "More than one primary master server. Master name is " + propertiesMaster ); - } else { - masterProperties = master.getKettleProperties(); - log.logBasic( "Got properties from master server [" + master.toString() + "], address [" + master - .getServerAndPort() + "]" ); + if (masterProperties != null) { + EnvUtil.applyKettleProperties(masterProperties, slaveServer.isOverrideExistingProperties()); + } + + // If we need to time out finished or idle objects, we should create a timer in the background to clean + // this is done automatically now + // CarteSingleton.installPurgeTimer(config, log, transformationMap, jobMap); + + if (allOK) { + boolean shouldJoin = config.isJoining(); + if (joinOverride != null) { + shouldJoin = joinOverride; } - } - } catch ( Exception e ) { - log.logError( "Unable to get properties from master server [" + master.toString() + "], address [" + master - .getServerAndPort() + "]" ); - allOK = false; + + this.webServer = + new WebServer(log, transformationMap, jobMap, socketRepository, detections, hostname, port, shouldJoin, + config.getPasswordFile(), slaveServer.getSslConfig()); } - } } - if ( masterProperties != null ) { - EnvUtil.applyKettleProperties( masterProperties, slaveServer.isOverrideExistingProperties() ); + + public static void main(String[] args) { + try { + parseAndRunCommand(args); + } catch (Exception e) { + e.printStackTrace(); + } } - // If we need to time out finished or idle objects, we should create a timer in the background to clean - // this is done automatically now - // CarteSingleton.installPurgeTimer(config, log, transformationMap, jobMap); + @SuppressWarnings("static-access") + private static void parseAndRunCommand(String[] args) throws Exception { + options = new Options(); + options.addOption(OptionBuilder.withLongOpt("stop").withDescription(BaseMessages.getString(PKG, + "Carte.ParamDescription.stop")).hasArg(false).isRequired(false).create('s')); + options.addOption(OptionBuilder.withLongOpt("userName").withDescription(BaseMessages.getString(PKG, + "Carte.ParamDescription.userName")).hasArg(true).isRequired(false).create('u')); + options.addOption(OptionBuilder.withLongOpt("password").withDescription(BaseMessages.getString(PKG, + "Carte.ParamDescription.password")).hasArg(true).isRequired(false).create('p')); + options.addOption(OptionBuilder.withLongOpt("help").withDescription(BaseMessages.getString(PKG, + "Carte.ParamDescription.help")).create('h')); + + CommandLineParser parser = new BasicParser(); + CommandLine cmd = parser.parse(options, args); + + if (cmd.hasOption('h')) { + displayHelpAndAbort(); + } - if ( allOK ) { - boolean shouldJoin = config.isJoining(); - if ( joinOverride != null ) { - shouldJoin = joinOverride; - } + String[] arguments = cmd.getArgs(); + boolean usingConfigFile = false; - this.webServer = - new WebServer( log, transformationMap, jobMap, socketRepository, detections, hostname, port, shouldJoin, - config.getPasswordFile(), slaveServer.getSslConfig() ); - } - } + // Load from an xml file that describes the complete configuration... + // + SlaveServerConfig config = null; + if (arguments.length == 1 && !Const.isEmpty(arguments[0])) { + if (cmd.hasOption('s')) { + throw new Carte.CarteCommandException(BaseMessages.getString(PKG, "Carte.Error.illegalStop")); + } + usingConfigFile = true; + FileObject file = KettleVFS.getFileObject(arguments[0]); + Document document = XMLHandler.loadXMLFile(file); + setKettleEnvironment(); // Must stand up server now to allow decryption of password + Node configNode = XMLHandler.getSubNode(document, SlaveServerConfig.XML_TAG); + config = new SlaveServerConfig(new LogChannel("Slave server config"), configNode); + if (config.getAutoSequence() != null) { + config.readAutoSequences(); + } + config.setFilename(arguments[0]); + } + if (arguments.length == 2 && !Const.isEmpty(arguments[0]) && !Const.isEmpty(arguments[1])) { + String hostname = arguments[0]; + String port = arguments[1]; + + if (cmd.hasOption('s')) { + String user = cmd.getOptionValue('u'); + String password = cmd.getOptionValue('p'); + shutdown(hostname, port, user, password); + System.exit(0); + } - public static void main( String[] args ) { - try { - parseAndRunCommand( args ); - } catch ( Exception e ) { - e.printStackTrace(); - } - } - - @SuppressWarnings( "static-access" ) - private static void parseAndRunCommand( String[] args ) throws Exception { - options = new Options(); - options.addOption( OptionBuilder.withLongOpt( "stop" ).withDescription( BaseMessages.getString( PKG, - "Carte.ParamDescription.stop" ) ).hasArg( false ).isRequired( false ).create( 's' ) ); - options.addOption( OptionBuilder.withLongOpt( "userName" ).withDescription( BaseMessages.getString( PKG, - "Carte.ParamDescription.userName" ) ).hasArg( true ).isRequired( false ).create( 'u' ) ); - options.addOption( OptionBuilder.withLongOpt( "password" ).withDescription( BaseMessages.getString( PKG, - "Carte.ParamDescription.password" ) ).hasArg( true ).isRequired( false ).create( 'p' ) ); - options.addOption( OptionBuilder.withLongOpt( "help" ).withDescription( BaseMessages.getString( PKG, - "Carte.ParamDescription.help" ) ).create( 'h' ) ); - - CommandLineParser parser = new BasicParser(); - CommandLine cmd = parser.parse( options, args ); - - if ( cmd.hasOption( 'h' ) ) { - displayHelpAndAbort(); + SlaveServer slaveServer = new SlaveServer(hostname + ":" + port, hostname, port, null, null); + + config = new SlaveServerConfig(); + config.setSlaveServer(slaveServer); + } + + // Nothing configured: show the usage + // + if (config == null) { + displayHelpAndAbort(); + } + + if (!usingConfigFile) { + setKettleEnvironment(); + } + runCarte(config); } - String[] arguments = cmd.getArgs(); - boolean usingConfigFile = false; - - // Load from an xml file that describes the complete configuration... - // - SlaveServerConfig config = null; - if ( arguments.length == 1 && !Const.isEmpty( arguments[0] ) ) { - if ( cmd.hasOption( 's' ) ) { - throw new Carte.CarteCommandException( BaseMessages.getString( PKG, "Carte.Error.illegalStop" ) ); - } - usingConfigFile = true; - FileObject file = KettleVFS.getFileObject( arguments[0] ); - Document document = XMLHandler.loadXMLFile( file ); - setKettleEnvironment(); // Must stand up server now to allow decryption of password - Node configNode = XMLHandler.getSubNode( document, SlaveServerConfig.XML_TAG ); - config = new SlaveServerConfig( new LogChannel( "Slave server config" ), configNode ); - if ( config.getAutoSequence() != null ) { - config.readAutoSequences(); - } - config.setFilename( arguments[0] ); + private static void setKettleEnvironment() throws Exception { + KettleClientEnvironment.getInstance().setClient(KettleClientEnvironment.ClientType.CARTE); + KettleEnvironment.init(); } - if ( arguments.length == 2 && !Const.isEmpty( arguments[0] ) && !Const.isEmpty( arguments[1] ) ) { - String hostname = arguments[0]; - String port = arguments[1]; - if ( cmd.hasOption( 's' ) ) { - String user = cmd.getOptionValue( 'u' ); - String password = cmd.getOptionValue( 'p' ); - shutdown( hostname, port, user, password ); - System.exit( 0 ); - } + public static void runCarte(SlaveServerConfig config) throws Exception { + KettleLogStore.init(config.getMaxLogLines(), config.getMaxLogTimeoutMinutes()); + + config.setJoining(true); - SlaveServer slaveServer = new SlaveServer( hostname + ":" + port, hostname, port, null, null ); + Carte carte = new Carte(config, false); + CarteSingleton.setCarte(carte); - config = new SlaveServerConfig(); - config.setSlaveServer( slaveServer ); + carte.getWebServer().join(); } - // Nothing configured: show the usage - // - if ( config == null ) { - displayHelpAndAbort(); + /** + * @return the webServer + */ + public WebServer getWebServer() { + return webServer; } - if ( !usingConfigFile ) { - setKettleEnvironment(); + /** + * @param webServer the webServer to set + */ + public void setWebServer(WebServer webServer) { + this.webServer = webServer; } - runCarte( config ); - } - - private static void setKettleEnvironment() throws Exception { - KettleClientEnvironment.getInstance().setClient( KettleClientEnvironment.ClientType.CARTE ); - KettleEnvironment.init(); - } - - public static void runCarte( SlaveServerConfig config ) throws Exception { - KettleLogStore.init( config.getMaxLogLines(), config.getMaxLogTimeoutMinutes() ); - - config.setJoining( true ); - - Carte carte = new Carte( config, false ); - CarteSingleton.setCarte( carte ); - - carte.getWebServer().join(); - } - - /** - * @return the webServer - */ - public WebServer getWebServer() { - return webServer; - } - - /** - * @param webServer - * the webServer to set - */ - public void setWebServer( WebServer webServer ) { - this.webServer = webServer; - } - - /** - * @return the slave server (Carte) configuration - */ - public SlaveServerConfig getConfig() { - return config; - } - - /** - * @param config - * the slave server (Carte) configuration - */ - public void setConfig( SlaveServerConfig config ) { - this.config = config; - } - - private static void displayHelpAndAbort() { - HelpFormatter formatter = new HelpFormatter(); - String optionsHelp = getOptionsHelpForUsage(); - String header = - BaseMessages.getString( PKG, "Carte.Usage.Text" ) + optionsHelp + "\nor\n" + BaseMessages.getString( PKG, - "Carte.Usage.Text2" ) + "\n\n" + BaseMessages.getString( PKG, "Carte.MainDescription" ); - - StringWriter stringWriter = new StringWriter(); - PrintWriter printWriter = new PrintWriter( stringWriter ); - formatter.printHelp( printWriter, 80, "CarteDummy", header, options, 5, 5, "", false ); - System.err.println( stripOff( stringWriter.toString(), "usage: CarteDummy" ) ); - - System.err.println( BaseMessages.getString( PKG, "Carte.Usage.Example" ) + ": Carte 127.0.0.1 8080" ); - System.err.println( BaseMessages.getString( PKG, "Carte.Usage.Example" ) + ": Carte 192.168.1.221 8081" ); - System.err.println(); - System.err.println( BaseMessages.getString( PKG, "Carte.Usage.Example" ) + ": Carte /foo/bar/carte-config.xml" ); - System.err.println( BaseMessages.getString( PKG, "Carte.Usage.Example" ) - + ": Carte http://www.example.com/carte-config.xml" ); - System.err.println( BaseMessages.getString( PKG, "Carte.Usage.Example" ) - + ": Carte 127.0.0.1 8080 -s -u cluster -p cluster" ); - - System.exit( 1 ); - } - - private static String getOptionsHelpForUsage() { - HelpFormatter formatter = new HelpFormatter(); - StringWriter stringWriter = new StringWriter(); - PrintWriter printWriter = new PrintWriter( stringWriter ); - formatter.printUsage( printWriter, 999, "", options ); - return stripOff( stringWriter.toString(), "usage: " ); // Strip off the "usage:" so it can be localized - } - - private static String stripOff( String target, String strip ) { - return target.substring( target.indexOf( strip ) + strip.length() ); - } - - private static void shutdown( String hostname, String port, String username, String password ) { - try { - callStopCarteRestService( hostname, port, username, password ); - } catch ( Exception e ) { - e.printStackTrace(); + + /** + * @return the slave server (Carte) configuration + */ + public SlaveServerConfig getConfig() { + return config; } - } - - /** - * Checks that Carte is running and if so, shuts down the Carte server - * - * @param hostname - * @param port - * @param username - * @param password - * @throws ParseException - * @throws CarteCommandException - */ - private static void callStopCarteRestService( String hostname, String port, String username, String password ) - throws ParseException, CarteCommandException { - // get information about the remote connection - try { - ClientConfig clientConfig = new DefaultClientConfig(); - clientConfig.getFeatures().put( JSONConfiguration.FEATURE_POJO_MAPPING, Boolean.TRUE ); - Client client = Client.create( clientConfig ); - client.addFilter( new HTTPBasicAuthFilter( username, password ) ); - - // check if the user can access the carte server. Don't really need this call but may want to check it's output at - // some point - String contextURL = "http://" + hostname + ":" + port + "/kettle"; - WebResource resource = client.resource( contextURL + "/status/?xml=Y" ); - String response = resource.get( String.class ); - if ( response == null || !response.contains( "" ) ) { - throw new Carte.CarteCommandException( BaseMessages.getString( PKG, "Carte.Error.NoServerFound", hostname, "" - + port ) ); - } - - // This is the call that matters - resource = client.resource( contextURL + "/stopCarte" ); - response = resource.get( String.class ); - if ( response == null || !response.contains( "Shutting Down" ) ) { - throw new Carte.CarteCommandException( BaseMessages.getString( PKG, "Carte.Error.NoShutdown", hostname, "" - + port ) ); - } - } catch ( Exception e ) { - throw new Carte.CarteCommandException( BaseMessages.getString( PKG, "Carte.Error.NoServerFound", hostname, "" - + port ), e ); + + /** + * @param config the slave server (Carte) configuration + */ + public void setConfig(SlaveServerConfig config) { + this.config = config; } - } - /** - * Exception generated when command line fails - */ - public static class CarteCommandException extends Exception { - private static final long serialVersionUID = 1L; + private static void displayHelpAndAbort() { + HelpFormatter formatter = new HelpFormatter(); + String optionsHelp = getOptionsHelpForUsage(); + String header = + BaseMessages.getString(PKG, "Carte.Usage.Text") + optionsHelp + "\nor\n" + BaseMessages.getString(PKG, + "Carte.Usage.Text2") + "\n\n" + BaseMessages.getString(PKG, "Carte.MainDescription"); + + StringWriter stringWriter = new StringWriter(); + PrintWriter printWriter = new PrintWriter(stringWriter); + formatter.printHelp(printWriter, 80, "CarteDummy", header, options, 5, 5, "", false); + System.err.println(stripOff(stringWriter.toString(), "usage: CarteDummy")); + + System.err.println(BaseMessages.getString(PKG, "Carte.Usage.Example") + ": Carte 127.0.0.1 8080"); + System.err.println(BaseMessages.getString(PKG, "Carte.Usage.Example") + ": Carte 192.168.1.221 8081"); + System.err.println(); + System.err.println(BaseMessages.getString(PKG, "Carte.Usage.Example") + ": Carte /foo/bar/carte-config.xml"); + System.err.println(BaseMessages.getString(PKG, "Carte.Usage.Example") + + ": Carte http://www.example.com/carte-config.xml"); + System.err.println(BaseMessages.getString(PKG, "Carte.Usage.Example") + + ": Carte 127.0.0.1 8080 -s -u cluster -p cluster"); + + System.exit(1); + } + + private static String getOptionsHelpForUsage() { + HelpFormatter formatter = new HelpFormatter(); + StringWriter stringWriter = new StringWriter(); + PrintWriter printWriter = new PrintWriter(stringWriter); + formatter.printUsage(printWriter, 999, "", options); + return stripOff(stringWriter.toString(), "usage: "); // Strip off the "usage:" so it can be localized + } - public CarteCommandException() { + private static String stripOff(String target, String strip) { + return target.substring(target.indexOf(strip) + strip.length()); } - public CarteCommandException( final String message ) { - super( message ); + private static void shutdown(String hostname, String port, String username, String password) { + try { + callStopCarteRestService(hostname, port, username, password); + } catch (Exception e) { + e.printStackTrace(); + } } - public CarteCommandException( final String message, final Throwable cause ) { - super( message, cause ); + /** + * Checks that Carte is running and if so, shuts down the Carte server + * + * @param hostname + * @param port + * @param username + * @param password + * @throws ParseException + * @throws CarteCommandException + */ + private static void callStopCarteRestService(String hostname, String port, String username, String password) + throws ParseException, CarteCommandException { + // get information about the remote connection + try { + ClientConfig clientConfig = new DefaultClientConfig(); + clientConfig.getFeatures().put(JSONConfiguration.FEATURE_POJO_MAPPING, Boolean.TRUE); + Client client = Client.create(clientConfig); + client.addFilter(new HTTPBasicAuthFilter(username, password)); + + // check if the user can access the carte server. Don't really need this call but may want to check it's output at + // some point + String contextURL = "http://" + hostname + ":" + port + "/kettle"; + WebResource resource = client.resource(contextURL + "/status/?xml=Y"); + String response = resource.get(String.class); + if (response == null || !response.contains("")) { + throw new Carte.CarteCommandException(BaseMessages.getString(PKG, "Carte.Error.NoServerFound", hostname, "" + + port)); + } + + // This is the call that matters + resource = client.resource(contextURL + "/stopCarte"); + response = resource.get(String.class); + if (response == null || !response.contains("Shutting Down")) { + throw new Carte.CarteCommandException(BaseMessages.getString(PKG, "Carte.Error.NoShutdown", hostname, "" + + port)); + } + } catch (Exception e) { + throw new Carte.CarteCommandException(BaseMessages.getString(PKG, "Carte.Error.NoServerFound", hostname, "" + + port), e); + } } - public CarteCommandException( final Throwable cause ) { - super( cause ); + /** + * Exception generated when command line fails + */ + public static class CarteCommandException extends Exception { + private static final long serialVersionUID = 1L; + + public CarteCommandException() { + } + + public CarteCommandException(final String message) { + super(message); + } + + public CarteCommandException(final String message, final Throwable cause) { + super(message, cause); + } + + public CarteCommandException(final Throwable cause) { + super(cause); + } } - } } diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/www/CarteSingleton.java b/pentaho-kettle/src/main/java/org/pentaho/di/www/CarteSingleton.java index c932be2..e839cfa 100644 --- a/pentaho-kettle/src/main/java/org/pentaho/di/www/CarteSingleton.java +++ b/pentaho-kettle/src/main/java/org/pentaho/di/www/CarteSingleton.java @@ -22,276 +22,264 @@ package org.pentaho.di.www; -import java.util.ArrayList; -import java.util.List; -import java.util.Timer; -import java.util.TimerTask; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicBoolean; - import org.pentaho.di.cluster.SlaveServer; import org.pentaho.di.core.Const; import org.pentaho.di.core.KettleEnvironment; import org.pentaho.di.core.exception.KettleException; -import org.pentaho.di.core.logging.KettleLogStore; -import org.pentaho.di.core.logging.LogChannel; -import org.pentaho.di.core.logging.LogChannelInterface; -import org.pentaho.di.core.logging.LogLevel; -import org.pentaho.di.core.logging.LoggingObjectType; -import org.pentaho.di.core.logging.LoggingRegistry; -import org.pentaho.di.core.logging.SimpleLoggingObject; +import org.pentaho.di.core.logging.*; import org.pentaho.di.core.util.EnvUtil; import org.pentaho.di.i18n.BaseMessages; import org.pentaho.di.job.Job; import org.pentaho.di.trans.Trans; -import org.pentaho.di.trans.TransConfiguration; -import org.pentaho.di.trans.TransExecutionConfiguration; -public class CarteSingleton { +import java.util.*; +import java.util.concurrent.atomic.AtomicBoolean; - private static Class PKG = Carte.class; // for i18n purposes, needed by Translator2!! +public class CarteSingleton { - private static SlaveServerConfig slaveServerConfig; - private static CarteSingleton carteSingleton; - private static Carte carte; + private static Class PKG = Carte.class; // for i18n purposes, needed by Translator2!! + + private static SlaveServerConfig slaveServerConfig; + private static CarteSingleton carteSingleton; + private static Carte carte; + + private LogChannelInterface log; + + private TransformationMap transformationMap; + private JobMap jobMap; + private List detections; + private SocketRepository socketRepository; + + private CarteSingleton(SlaveServerConfig config) throws KettleException { + KettleEnvironment.init(); + KettleLogStore.init(config.getMaxLogLines(), config.getMaxLogTimeoutMinutes()); + + this.log = new LogChannel("Carte"); + transformationMap = new TransformationMap(); + transformationMap.setSlaveServerConfig(config); + jobMap = new JobMap(); + jobMap.setSlaveServerConfig(config); + detections = new ArrayList(); + socketRepository = new SocketRepository(log); + + installPurgeTimer(config, log, transformationMap, jobMap); + + SlaveServer slaveServer = config.getSlaveServer(); + if (slaveServer != null) { + int port = WebServer.PORT; + if (!Const.isEmpty(slaveServer.getPort())) { + try { + port = Integer.parseInt(slaveServer.getPort()); + } catch (Exception e) { + log.logError(BaseMessages.getString(PKG, "Carte.Error.CanNotPartPort", slaveServer.getHostname(), "" + + port), e); + } + } - private LogChannelInterface log; + // TODO: see if we need to keep doing this on a periodic basis. + // The master might be dead or not alive yet at the time we send this + // message. + // Repeating the registration over and over every few minutes might + // harden this sort of problems. + // + if (config.isReportingToMasters()) { + String hostname = slaveServer.getHostname(); + final SlaveServer client = + new SlaveServer("Dynamic slave [" + hostname + ":" + port + "]", hostname, "" + port, slaveServer + .getUsername(), slaveServer.getPassword()); + for (final SlaveServer master : config.getMasters()) { + // Here we use the username/password specified in the slave + // server section of the configuration. + // This doesn't have to be the same pair as the one used on the + // master! + // + try { + SlaveServerDetection slaveServerDetection = new SlaveServerDetection(client); + master.sendXML(slaveServerDetection.getXML(), RegisterSlaveServlet.CONTEXT_PATH + "/"); + log.logBasic("Registered this slave server to master slave server [" + + master.toString() + "] on address [" + master.getServerAndPort() + "]"); + } catch (Exception e) { + log.logError("Unable to register to master slave server [" + + master.toString() + "] on address [" + master.getServerAndPort() + "]"); + } + } + } + } + } - private TransformationMap transformationMap; - private JobMap jobMap; - private List detections; - private SocketRepository socketRepository; + public static void installPurgeTimer(final SlaveServerConfig config, final LogChannelInterface log, + final TransformationMap transformationMap, final JobMap jobMap) { - private CarteSingleton( SlaveServerConfig config ) throws KettleException { - KettleEnvironment.init(); - KettleLogStore.init( config.getMaxLogLines(), config.getMaxLogTimeoutMinutes() ); + final int objectTimeout; + String systemTimeout = EnvUtil.getSystemProperty(Const.KETTLE_CARTE_OBJECT_TIMEOUT_MINUTES, null); - this.log = new LogChannel( "Carte" ); - transformationMap = new TransformationMap(); - transformationMap.setSlaveServerConfig( config ); - jobMap = new JobMap(); - jobMap.setSlaveServerConfig( config ); - detections = new ArrayList(); - socketRepository = new SocketRepository( log ); + // The value specified in XML takes precedence over the environment variable! + // + if (config.getObjectTimeoutMinutes() > 0) { + objectTimeout = config.getObjectTimeoutMinutes(); + } else if (!Const.isEmpty(systemTimeout)) { + objectTimeout = Const.toInt(systemTimeout, 1440); + } else { + objectTimeout = 24 * 60; // 1440 : default is a one day time-out + } - installPurgeTimer( config, log, transformationMap, jobMap ); + // If we need to time out finished or idle objects, we should create a timer + // in the background to clean + // + if (objectTimeout > 0) { + + log.logBasic("Installing timer to purge stale objects after " + objectTimeout + " minutes."); + + Timer timer = new Timer(true); + + final AtomicBoolean busy = new AtomicBoolean(false); + TimerTask timerTask = new TimerTask() { + public void run() { + if (!busy.get()) { + busy.set(true); + + try { + // Check all transformations... + // + for (CarteObjectEntry entry : transformationMap.getTransformationObjects()) { + Trans trans = transformationMap.getTransformation(entry); + + // See if the transformation is finished or stopped. + // + if (trans != null && (trans.isFinished() || trans.isStopped()) && trans.getLogDate() != null) { + // check the last log time + // + int diffInMinutes = + (int) Math.floor((System.currentTimeMillis() - trans.getLogDate().getTime()) / 60000); + if (diffInMinutes >= objectTimeout) { + // Let's remove this from the transformation map... + // + transformationMap.removeTransformation(entry); + + // Remove the logging information from the log registry & central log store + // + LoggingRegistry.getInstance().removeIncludingChildren(trans.getLogChannelId()); + KettleLogStore.discardLines(trans.getLogChannelId(), false); + + // transformationMap.deallocateServerSocketPorts(entry); + + log.logMinimal("Cleaned up transformation " + + entry.getName() + " with id " + entry.getId() + " from " + trans.getLogDate() + + ", diff=" + diffInMinutes); + } + } + } + + // And the jobs... + // + for (CarteObjectEntry entry : jobMap.getJobObjects()) { + Job job = jobMap.getJob(entry); + + // See if the job is finished or stopped. + // + if (job != null && (job.isFinished() || job.isStopped()) && job.getLogDate() != null) { + // check the last log time + // + int diffInMinutes = + (int) Math.floor((System.currentTimeMillis() - job.getLogDate().getTime()) / 60000); + if (diffInMinutes >= objectTimeout) { + // Let's remove this from the job map... + // + jobMap.removeJob(entry); + log.logMinimal("Cleaned up job " + + entry.getName() + " with id " + entry.getId() + " from " + job.getLogDate()); + } + } + } + + } finally { + busy.set(false); + } + } + } + }; - SlaveServer slaveServer = config.getSlaveServer(); - if ( slaveServer != null ) { - int port = WebServer.PORT; - if ( !Const.isEmpty( slaveServer.getPort() ) ) { - try { - port = Integer.parseInt( slaveServer.getPort() ); - } catch ( Exception e ) { - log.logError( BaseMessages.getString( PKG, "Carte.Error.CanNotPartPort", slaveServer.getHostname(), "" - + port ), e ); - } - } - - // TODO: see if we need to keep doing this on a periodic basis. - // The master might be dead or not alive yet at the time we send this - // message. - // Repeating the registration over and over every few minutes might - // harden this sort of problems. - // - if ( config.isReportingToMasters() ) { - String hostname = slaveServer.getHostname(); - final SlaveServer client = - new SlaveServer( "Dynamic slave [" + hostname + ":" + port + "]", hostname, "" + port, slaveServer - .getUsername(), slaveServer.getPassword() ); - for ( final SlaveServer master : config.getMasters() ) { - // Here we use the username/password specified in the slave - // server section of the configuration. - // This doesn't have to be the same pair as the one used on the - // master! - // - try { - SlaveServerDetection slaveServerDetection = new SlaveServerDetection( client ); - master.sendXML( slaveServerDetection.getXML(), RegisterSlaveServlet.CONTEXT_PATH + "/" ); - log.logBasic( "Registered this slave server to master slave server [" - + master.toString() + "] on address [" + master.getServerAndPort() + "]" ); - } catch ( Exception e ) { - log.logError( "Unable to register to master slave server [" - + master.toString() + "] on address [" + master.getServerAndPort() + "]" ); - } + // Search for stale objects every 20 seconds: + // + timer.schedule(timerTask, 20000, 20000); } - } } - } - - public static void installPurgeTimer( final SlaveServerConfig config, final LogChannelInterface log, - final TransformationMap transformationMap, final JobMap jobMap ) { - - final int objectTimeout; - String systemTimeout = EnvUtil.getSystemProperty( Const.KETTLE_CARTE_OBJECT_TIMEOUT_MINUTES, null ); - - // The value specified in XML takes precedence over the environment variable! - // - if ( config.getObjectTimeoutMinutes() > 0 ) { - objectTimeout = config.getObjectTimeoutMinutes(); - } else if ( !Const.isEmpty( systemTimeout ) ) { - objectTimeout = Const.toInt( systemTimeout, 1440 ); - } else { - objectTimeout = 24 * 60; // 1440 : default is a one day time-out - } - - // If we need to time out finished or idle objects, we should create a timer - // in the background to clean - // - if ( objectTimeout > 0 ) { - - log.logBasic( "Installing timer to purge stale objects after " + objectTimeout + " minutes." ); - - Timer timer = new Timer( true ); - - final AtomicBoolean busy = new AtomicBoolean( false ); - TimerTask timerTask = new TimerTask() { - public void run() { - if ( !busy.get() ) { - busy.set( true ); - - try { - // Check all transformations... - // - for ( CarteObjectEntry entry : transformationMap.getTransformationObjects() ) { - Trans trans = transformationMap.getTransformation( entry ); - - // See if the transformation is finished or stopped. - // - if ( trans != null && ( trans.isFinished() || trans.isStopped() ) && trans.getLogDate() != null ) { - // check the last log time - // - int diffInMinutes = - (int) Math.floor( ( System.currentTimeMillis() - trans.getLogDate().getTime() ) / 60000 ); - if ( diffInMinutes >= objectTimeout ) { - // Let's remove this from the transformation map... - // - transformationMap.removeTransformation( entry ); - // Remove the logging information from the log registry & central log store - // - LoggingRegistry.getInstance().removeIncludingChildren( trans.getLogChannelId() ); - KettleLogStore.discardLines( trans.getLogChannelId(), false ); + public static CarteSingleton getInstance() { + try { + if (carteSingleton == null) { + if (slaveServerConfig == null) { + slaveServerConfig = new SlaveServerConfig(); + SlaveServer slaveServer = new SlaveServer(); + slaveServerConfig.setSlaveServer(slaveServer); + } - // transformationMap.deallocateServerSocketPorts(entry); + carteSingleton = new CarteSingleton(slaveServerConfig); - log.logMinimal( "Cleaned up transformation " - + entry.getName() + " with id " + entry.getId() + " from " + trans.getLogDate() - + ", diff=" + diffInMinutes ); - } - } - } - - // And the jobs... - // - for ( CarteObjectEntry entry : jobMap.getJobObjects() ) { - Job job = jobMap.getJob( entry ); - - // See if the job is finished or stopped. - // - if ( job != null && ( job.isFinished() || job.isStopped() ) && job.getLogDate() != null ) { - // check the last log time - // - int diffInMinutes = - (int) Math.floor( ( System.currentTimeMillis() - job.getLogDate().getTime() ) / 60000 ); - if ( diffInMinutes >= objectTimeout ) { - // Let's remove this from the job map... - // - jobMap.removeJob( entry ); - log.logMinimal( "Cleaned up job " - + entry.getName() + " with id " + entry.getId() + " from " + job.getLogDate() ); - } - } - } + String carteObjectId = UUID.randomUUID().toString(); + SimpleLoggingObject servletLoggingObject = + new SimpleLoggingObject("CarteSingleton", LoggingObjectType.CARTE, null); + servletLoggingObject.setContainerObjectId(carteObjectId); + servletLoggingObject.setLogLevel(LogLevel.BASIC); - } finally { - busy.set( false ); + return carteSingleton; + } else { + return carteSingleton; } - } + } catch (KettleException ke) { + throw new RuntimeException(ke); } - }; - - // Search for stale objects every 20 seconds: - // - timer.schedule( timerTask, 20000, 20000 ); } - } - - public static CarteSingleton getInstance() { - try { - if ( carteSingleton == null ) { - if ( slaveServerConfig == null ) { - slaveServerConfig = new SlaveServerConfig(); - SlaveServer slaveServer = new SlaveServer(); - slaveServerConfig.setSlaveServer( slaveServer ); - } - carteSingleton = new CarteSingleton( slaveServerConfig ); - - String carteObjectId = UUID.randomUUID().toString(); - SimpleLoggingObject servletLoggingObject = - new SimpleLoggingObject( "CarteSingleton", LoggingObjectType.CARTE, null ); - servletLoggingObject.setContainerObjectId( carteObjectId ); - servletLoggingObject.setLogLevel( LogLevel.BASIC ); - - return carteSingleton; - } else { - return carteSingleton; - } - } catch ( KettleException ke ) { - throw new RuntimeException( ke ); + public TransformationMap getTransformationMap() { + return transformationMap; } - } - - public TransformationMap getTransformationMap() { - return transformationMap; - } - public void setTransformationMap( TransformationMap transformationMap ) { - this.transformationMap = transformationMap; - } + public void setTransformationMap(TransformationMap transformationMap) { + this.transformationMap = transformationMap; + } - public JobMap getJobMap() { - return jobMap; - } + public JobMap getJobMap() { + return jobMap; + } - public void setJobMap( JobMap jobMap ) { - this.jobMap = jobMap; - } + public void setJobMap(JobMap jobMap) { + this.jobMap = jobMap; + } - public List getDetections() { - return detections; - } + public List getDetections() { + return detections; + } - public void setDetections( List detections ) { - this.detections = detections; - } + public void setDetections(List detections) { + this.detections = detections; + } - public SocketRepository getSocketRepository() { - return socketRepository; - } + public SocketRepository getSocketRepository() { + return socketRepository; + } - public void setSocketRepository( SocketRepository socketRepository ) { - this.socketRepository = socketRepository; - } + public void setSocketRepository(SocketRepository socketRepository) { + this.socketRepository = socketRepository; + } - public static SlaveServerConfig getSlaveServerConfig() { - return slaveServerConfig; - } + public static SlaveServerConfig getSlaveServerConfig() { + return slaveServerConfig; + } - public static void setSlaveServerConfig( SlaveServerConfig slaveServerConfig ) { - CarteSingleton.slaveServerConfig = slaveServerConfig; - } + public static void setSlaveServerConfig(SlaveServerConfig slaveServerConfig) { + CarteSingleton.slaveServerConfig = slaveServerConfig; + } - public static void setCarte( Carte carte ) { - CarteSingleton.carte = carte; - } + public static void setCarte(Carte carte) { + CarteSingleton.carte = carte; + } - public static Carte getCarte() { - return CarteSingleton.carte; - } + public static Carte getCarte() { + return CarteSingleton.carte; + } - public LogChannelInterface getLog() { - return log; - } + public LogChannelInterface getLog() { + return log; + } } diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/www/WebServer.java b/pentaho-kettle/src/main/java/org/pentaho/di/www/WebServer.java index 857de35..1de6de0 100644 --- a/pentaho-kettle/src/main/java/org/pentaho/di/www/WebServer.java +++ b/pentaho-kettle/src/main/java/org/pentaho/di/www/WebServer.java @@ -55,467 +55,463 @@ import javax.servlet.Servlet; import java.io.File; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Date; -import java.util.List; -import java.util.Timer; -import java.util.TimerTask; +import java.util.*; public class WebServer { - private static Class PKG = WebServer.class; // for i18n purposes, needed by Translator2!! - - private LogChannelInterface log; - - public static final int PORT = 80; - - private Server server; - - private TransformationMap transformationMap; - private JobMap jobMap; - private List detections; - private SocketRepository socketRepository; - - private String hostname; - private int port; - - private Timer slaveMonitoringTimer; - - private String passwordFile; - private WebServerShutdownHook webServerShutdownHook; - private IWebServerShutdownHandler webServerShutdownHandler = new DefaultWebServerShutdownHandler(); - - private SslConfiguration sslConfig; - - public WebServer( LogChannelInterface log, TransformationMap transformationMap, JobMap jobMap, - SocketRepository socketRepository, List detections, String hostname, int port, boolean join, - String passwordFile ) throws Exception { - this( log, transformationMap, jobMap, socketRepository, detections, hostname, port, join, passwordFile, null ); - } - - public WebServer( LogChannelInterface log, TransformationMap transformationMap, JobMap jobMap, - SocketRepository socketRepository, List detections, String hostname, int port, boolean join, - String passwordFile, SslConfiguration sslConfig ) throws Exception { - this.log = log; - this.transformationMap = transformationMap; - this.jobMap = jobMap; - this.socketRepository = socketRepository; - this.detections = detections; - this.hostname = hostname; - this.port = port; - this.passwordFile = passwordFile; - this.sslConfig = sslConfig; - - startServer(); - - // Start the monitoring of the registered slave servers... - // - startSlaveMonitoring(); - - webServerShutdownHook = new WebServerShutdownHook( this ); - Runtime.getRuntime().addShutdownHook( webServerShutdownHook ); - - try { - ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.CarteStartup.id, this ); - } catch ( KettleException e ) { - // Log error but continue regular operations to make sure Carte continues to run properly - // - log.logError( "Error calling extension point CarteStartup", e ); - } - - if ( join ) { - server.join(); - } - } - - public WebServer( LogChannelInterface log, TransformationMap transformationMap, JobMap jobMap, - SocketRepository socketRepository, List slaveServers, String hostname, int port ) - throws Exception { - this( log, transformationMap, jobMap, socketRepository, slaveServers, hostname, port, true ); - } - - public WebServer( LogChannelInterface log, TransformationMap transformationMap, JobMap jobMap, - SocketRepository socketRepository, List detections, String hostname, int port, - boolean join ) throws Exception { - this( log, transformationMap, jobMap, socketRepository, detections, hostname, port, join, null, null ); - } - - public Server getServer() { - return server; - } - - public void startServer() throws Exception { - server = new Server(); - - List roles = new ArrayList(); - roles.add( Constraint.ANY_ROLE ); - - // Set up the security handler, optionally with JAAS - // - ConstraintSecurityHandler securityHandler = new ConstraintSecurityHandler(); - - if ( System.getProperty( "loginmodulename" ) != null - && System.getProperty( "java.security.auth.login.config" ) != null ) { - JAASLoginService jaasLoginService = new JAASLoginService( "Kettle" ); - jaasLoginService.setLoginModuleName( System.getProperty( "loginmodulename" ) ); - securityHandler.setLoginService( jaasLoginService ); - } else { - roles.add( "default" ); - HashLoginService hashLoginService; - SlaveServer slaveServer = transformationMap.getSlaveServerConfig().getSlaveServer(); - if ( !Const.isEmpty( slaveServer.getPassword() ) ) { - hashLoginService = new HashLoginService( "Kettle" ); - hashLoginService.putUser( slaveServer.getUsername(), new Password( slaveServer.getPassword() ), - new String[] { "default" } ); - } else { - // See if there is a kettle.pwd file in the KETTLE_HOME directory: - if ( Const.isEmpty( passwordFile ) ) { - File homePwdFile = new File( Const.getKettleCartePasswordFile() ); - if ( homePwdFile.exists() ) { - passwordFile = Const.getKettleCartePasswordFile(); - } else { - passwordFile = Const.getKettleLocalCartePasswordFile(); - } + private static Class PKG = WebServer.class; // for i18n purposes, needed by Translator2!! + + private LogChannelInterface log; + + public static final int PORT = 80; + + private Server server; + + private TransformationMap transformationMap; + private JobMap jobMap; + private List detections; + private SocketRepository socketRepository; + + private String hostname; + private int port; + + private Timer slaveMonitoringTimer; + + private String passwordFile; + private WebServerShutdownHook webServerShutdownHook; + private IWebServerShutdownHandler webServerShutdownHandler = new DefaultWebServerShutdownHandler(); + + private SslConfiguration sslConfig; + + public WebServer(LogChannelInterface log, TransformationMap transformationMap, JobMap jobMap, + SocketRepository socketRepository, List detections, String hostname, int port, boolean join, + String passwordFile) throws Exception { + this(log, transformationMap, jobMap, socketRepository, detections, hostname, port, join, passwordFile, null); + } + + public WebServer(LogChannelInterface log, TransformationMap transformationMap, JobMap jobMap, + SocketRepository socketRepository, List detections, String hostname, int port, boolean join, + String passwordFile, SslConfiguration sslConfig) throws Exception { + this.log = log; + this.transformationMap = transformationMap; + this.jobMap = jobMap; + this.socketRepository = socketRepository; + this.detections = detections; + this.hostname = hostname; + this.port = port; + this.passwordFile = passwordFile; + this.sslConfig = sslConfig; + + startServer(); + + // Start the monitoring of the registered slave servers... + // + startSlaveMonitoring(); + + webServerShutdownHook = new WebServerShutdownHook(this); + Runtime.getRuntime().addShutdownHook(webServerShutdownHook); + + try { + ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.CarteStartup.id, this); + } catch (KettleException e) { + // Log error but continue regular operations to make sure Carte continues to run properly + // + log.logError("Error calling extension point CarteStartup", e); + } + + if (join) { + server.join(); } - hashLoginService = new HashLoginService( "Kettle", passwordFile ) { - @Override public synchronized UserIdentity putUser( String userName, Credential credential, String[] roles ) { - List newRoles = new ArrayList(); - newRoles.add( "default" ); - Collections.addAll( newRoles, roles ); - return super.putUser( userName, credential, newRoles.toArray( new String[newRoles.size()] ) ); - } - }; - } - securityHandler.setLoginService( hashLoginService ); } - Constraint constraint = new Constraint(); - constraint.setName( Constraint.__BASIC_AUTH ); - constraint.setRoles( roles.toArray( new String[roles.size()] ) ); - constraint.setAuthenticate( true ); + public WebServer(LogChannelInterface log, TransformationMap transformationMap, JobMap jobMap, + SocketRepository socketRepository, List slaveServers, String hostname, int port) + throws Exception { + this(log, transformationMap, jobMap, socketRepository, slaveServers, hostname, port, true); + } - ConstraintMapping constraintMapping = new ConstraintMapping(); - constraintMapping.setConstraint( constraint ); - constraintMapping.setPathSpec( "/*" ); + public WebServer(LogChannelInterface log, TransformationMap transformationMap, JobMap jobMap, + SocketRepository socketRepository, List detections, String hostname, int port, + boolean join) throws Exception { + this(log, transformationMap, jobMap, socketRepository, detections, hostname, port, join, null, null); + } - securityHandler.setConstraintMappings( new ConstraintMapping[] { constraintMapping } ); + public Server getServer() { + return server; + } - // Add all the servlets defined in kettle-servlets.xml ... - // - ContextHandlerCollection contexts = new ContextHandlerCollection(); + public void startServer() throws Exception { + server = new Server(); - // Root - // - ServletContextHandler - root = - new ServletContextHandler( contexts, GetRootServlet.CONTEXT_PATH, ServletContextHandler.SESSIONS ); - GetRootServlet rootServlet = new GetRootServlet(); - rootServlet.setJettyMode( true ); - root.addServlet( new ServletHolder( rootServlet ), "/*" ); + List roles = new ArrayList(); + roles.add(Constraint.ANY_ROLE); - PluginRegistry pluginRegistry = PluginRegistry.getInstance(); - List plugins = pluginRegistry.getPlugins( CartePluginType.class ); - for ( PluginInterface plugin : plugins ) { + // Set up the security handler, optionally with JAAS + // + ConstraintSecurityHandler securityHandler = new ConstraintSecurityHandler(); + + if (System.getProperty("loginmodulename") != null + && System.getProperty("java.security.auth.login.config") != null) { + JAASLoginService jaasLoginService = new JAASLoginService("Kettle"); + jaasLoginService.setLoginModuleName(System.getProperty("loginmodulename")); + securityHandler.setLoginService(jaasLoginService); + } else { + roles.add("default"); + HashLoginService hashLoginService; + SlaveServer slaveServer = transformationMap.getSlaveServerConfig().getSlaveServer(); + if (!Const.isEmpty(slaveServer.getPassword())) { + hashLoginService = new HashLoginService("Kettle"); + hashLoginService.putUser(slaveServer.getUsername(), new Password(slaveServer.getPassword()), + new String[]{"default"}); + } else { + // See if there is a kettle.pwd file in the KETTLE_HOME directory: + if (Const.isEmpty(passwordFile)) { + File homePwdFile = new File(Const.getKettleCartePasswordFile()); + if (homePwdFile.exists()) { + passwordFile = Const.getKettleCartePasswordFile(); + } else { + passwordFile = Const.getKettleLocalCartePasswordFile(); + } + } + hashLoginService = new HashLoginService("Kettle", passwordFile) { + @Override + public synchronized UserIdentity putUser(String userName, Credential credential, String[] roles) { + List newRoles = new ArrayList(); + newRoles.add("default"); + Collections.addAll(newRoles, roles); + return super.putUser(userName, credential, newRoles.toArray(new String[newRoles.size()])); + } + }; + } + securityHandler.setLoginService(hashLoginService); + } - CartePluginInterface servlet = pluginRegistry.loadClass( plugin, CartePluginInterface.class ); - servlet.setup( transformationMap, jobMap, socketRepository, detections ); - servlet.setJettyMode( true ); + Constraint constraint = new Constraint(); + constraint.setName(Constraint.__BASIC_AUTH); + constraint.setRoles(roles.toArray(new String[roles.size()])); + constraint.setAuthenticate(true); - ServletContextHandler servletContext = - new ServletContextHandler( contexts, getContextPath( servlet ), ServletContextHandler.SESSIONS ); - ServletHolder servletHolder = new ServletHolder( (Servlet) servlet ); - servletContext.addServlet( servletHolder, "/*" ); - } + ConstraintMapping constraintMapping = new ConstraintMapping(); + constraintMapping.setConstraint(constraint); + constraintMapping.setPathSpec("/*"); - // setup jersey (REST) - ServletHolder jerseyServletHolder = new ServletHolder( ServletContainer.class ); - jerseyServletHolder.setInitParameter( "com.sun.jersey.config.property.resourceConfigClass", - "com.sun.jersey.api.core.PackagesResourceConfig" ); - jerseyServletHolder.setInitParameter( "com.sun.jersey.config.property.packages", "org.pentaho.di.www.jaxrs" ); - root.addServlet( jerseyServletHolder, "/api/*" ); + securityHandler.setConstraintMappings(new ConstraintMapping[]{constraintMapping}); - // setup static resource serving - // ResourceHandler mobileResourceHandler = new ResourceHandler(); - // mobileResourceHandler.setWelcomeFiles(new String[]{"index.html"}); - // mobileResourceHandler.setResourceBase(getClass().getClassLoader(). - // getResource("org/pentaho/di/www/mobile").toExternalForm()); - // Context mobileContext = new Context(contexts, "/mobile", Context.SESSIONS); - // mobileContext.setHandler(mobileResourceHandler); + // Add all the servlets defined in kettle-servlets.xml ... + // + ContextHandlerCollection contexts = new ContextHandlerCollection(); + + // Root + // + ServletContextHandler + root = + new ServletContextHandler(contexts, GetRootServlet.CONTEXT_PATH, ServletContextHandler.SESSIONS); + GetRootServlet rootServlet = new GetRootServlet(); + rootServlet.setJettyMode(true); + root.addServlet(new ServletHolder(rootServlet), "/*"); + + PluginRegistry pluginRegistry = PluginRegistry.getInstance(); + List plugins = pluginRegistry.getPlugins(CartePluginType.class); + for (PluginInterface plugin : plugins) { + + CartePluginInterface servlet = pluginRegistry.loadClass(plugin, CartePluginInterface.class); + servlet.setup(transformationMap, jobMap, socketRepository, detections); + servlet.setJettyMode(true); + + ServletContextHandler servletContext = + new ServletContextHandler(contexts, getContextPath(servlet), ServletContextHandler.SESSIONS); + ServletHolder servletHolder = new ServletHolder((Servlet) servlet); + servletContext.addServlet(servletHolder, "/*"); + } + + // setup jersey (REST) + ServletHolder jerseyServletHolder = new ServletHolder(ServletContainer.class); + jerseyServletHolder.setInitParameter("com.sun.jersey.config.property.resourceConfigClass", + "com.sun.jersey.api.core.PackagesResourceConfig"); + jerseyServletHolder.setInitParameter("com.sun.jersey.config.property.packages", "org.pentaho.di.www.jaxrs"); + root.addServlet(jerseyServletHolder, "/api/*"); + + // setup static resource serving + // ResourceHandler mobileResourceHandler = new ResourceHandler(); + // mobileResourceHandler.setWelcomeFiles(new String[]{"index.html"}); + // mobileResourceHandler.setResourceBase(getClass().getClassLoader(). + // getResource("org/pentaho/di/www/mobile").toExternalForm()); + // Context mobileContext = new Context(contexts, "/mobile", Context.SESSIONS); + // mobileContext.setHandler(mobileResourceHandler); + + // Allow png files to be shown for transformations and jobs... + // + ResourceHandler resourceHandler = new ResourceHandler(); + resourceHandler.setResourceBase("temp"); + // add all handlers/contexts to server - // Allow png files to be shown for transformations and jobs... - // - ResourceHandler resourceHandler = new ResourceHandler(); - resourceHandler.setResourceBase( "temp" ); - // add all handlers/contexts to server + HandlerList handlers = new HandlerList(); + handlers.setHandlers(new Handler[]{contexts, resourceHandler}); + securityHandler.setHandler(handlers); - HandlerList handlers = new HandlerList(); - handlers.setHandlers( new Handler[] { contexts, resourceHandler } ); - securityHandler.setHandler( handlers ); + server.setHandler(securityHandler); - server.setHandler( securityHandler ); + // Start execution + createListeners(); - // Start execution - createListeners(); + server.start(); + } - server.start(); - } + public String getContextPath(CartePluginInterface servlet) { + String contextPath = servlet.getContextPath(); + if (!contextPath.startsWith("/kettle")) { + contextPath = "/kettle" + contextPath; + } + return contextPath; + } - public String getContextPath( CartePluginInterface servlet ) { - String contextPath = servlet.getContextPath(); - if ( !contextPath.startsWith( "/kettle" ) ) { - contextPath = "/kettle" + contextPath; + public void join() throws InterruptedException { + server.join(); } - return contextPath; - } - public void join() throws InterruptedException { - server.join(); - } + public void stopServer() { - public void stopServer() { + webServerShutdownHook.setShuttingDown(true); - webServerShutdownHook.setShuttingDown( true ); + try { + ExtensionPointHandler.callExtensionPoint(log, KettleExtensionPoint.CarteShutdown.id, this); + } catch (KettleException e) { + // Log error but continue regular operations to make sure Carte can be shut down properly. + // + log.logError("Error calling extension point CarteStartup", e); + } - try { - ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.CarteShutdown.id, this ); - } catch ( KettleException e ) { - // Log error but continue regular operations to make sure Carte can be shut down properly. - // - log.logError( "Error calling extension point CarteStartup", e ); + try { + if (server != null) { + + // Stop the monitoring timer + // + if (slaveMonitoringTimer != null) { + slaveMonitoringTimer.cancel(); + slaveMonitoringTimer = null; + } + + // Clean up all the server sockets... + // + socketRepository.closeAll(); + + // Stop the server... + // + server.stop(); + KettleEnvironment.shutdown(); + if (webServerShutdownHandler != null) { + webServerShutdownHandler.shutdownWebServer(); + } + } + } catch (Exception e) { + log.logError(BaseMessages.getString(PKG, "WebServer.Error.FailedToStop.Title"), + BaseMessages.getString(PKG, "WebServer.Error.FailedToStop.Msg", "" + e)); + } } - try { - if ( server != null ) { + private void createListeners() { - // Stop the monitoring timer - // - if ( slaveMonitoringTimer != null ) { - slaveMonitoringTimer.cancel(); - slaveMonitoringTimer = null; + SocketConnector connector = getConnector(); + setupJettyOptions(connector); + connector.setPort(port); + connector.setHost(hostname); + connector.setName(BaseMessages.getString(PKG, "WebServer.Log.KettleHTTPListener", hostname)); + log.logBasic(BaseMessages.getString(PKG, "WebServer.Log.CreateListener", hostname, "" + port)); + + server.setConnectors(new Connector[]{connector}); + } + + private SocketConnector getConnector() { + if (sslConfig != null) { + log.logBasic(BaseMessages.getString(PKG, "WebServer.Log.SslModeUsing")); + SslSocketConnector connector = new SslSocketConnector(); + connector.setKeystore(sslConfig.getKeyStore()); + connector.setPassword(sslConfig.getKeyStorePassword()); + connector.setKeyPassword(sslConfig.getKeyPassword()); + connector.setKeystoreType(sslConfig.getKeyStoreType()); + return connector; + } else { + return new SocketConnector(); } - // Clean up all the server sockets... - // - socketRepository.closeAll(); + } - // Stop the server... - // - server.stop(); - KettleEnvironment.shutdown(); - if ( webServerShutdownHandler != null ) { - webServerShutdownHandler.shutdownWebServer(); + /** + * Set up jetty options to the connector + * + * @param connector + */ + protected void setupJettyOptions(SocketConnector connector) { + if (validProperty(Const.KETTLE_CARTE_JETTY_ACCEPTORS)) { + connector.setAcceptors(Integer.parseInt(System.getProperty(Const.KETTLE_CARTE_JETTY_ACCEPTORS))); + log.logBasic( + BaseMessages.getString(PKG, "WebServer.Log.ConfigOptions", "acceptors", connector.getAcceptors())); } - } - } catch ( Exception e ) { - log.logError( BaseMessages.getString( PKG, "WebServer.Error.FailedToStop.Title" ), - BaseMessages.getString( PKG, "WebServer.Error.FailedToStop.Msg", "" + e ) ); - } - } - - private void createListeners() { - - SocketConnector connector = getConnector(); - setupJettyOptions( connector ); - connector.setPort( port ); - connector.setHost( hostname ); - connector.setName( BaseMessages.getString( PKG, "WebServer.Log.KettleHTTPListener", hostname ) ); - log.logBasic( BaseMessages.getString( PKG, "WebServer.Log.CreateListener", hostname, "" + port ) ); - - server.setConnectors( new Connector[] { connector } ); - } - - private SocketConnector getConnector() { - if ( sslConfig != null ) { - log.logBasic( BaseMessages.getString( PKG, "WebServer.Log.SslModeUsing" ) ); - SslSocketConnector connector = new SslSocketConnector(); - connector.setKeystore( sslConfig.getKeyStore() ); - connector.setPassword( sslConfig.getKeyStorePassword() ); - connector.setKeyPassword( sslConfig.getKeyPassword() ); - connector.setKeystoreType( sslConfig.getKeyStoreType() ); - return connector; - } else { - return new SocketConnector(); - } - - } - - /** - * Set up jetty options to the connector - * - * @param connector - */ - protected void setupJettyOptions( SocketConnector connector ) { - if ( validProperty( Const.KETTLE_CARTE_JETTY_ACCEPTORS ) ) { - connector.setAcceptors( Integer.parseInt( System.getProperty( Const.KETTLE_CARTE_JETTY_ACCEPTORS ) ) ); - log.logBasic( - BaseMessages.getString( PKG, "WebServer.Log.ConfigOptions", "acceptors", connector.getAcceptors() ) ); - } - - if ( validProperty( Const.KETTLE_CARTE_JETTY_ACCEPT_QUEUE_SIZE ) ) { - connector - .setAcceptQueueSize( Integer.parseInt( System.getProperty( Const.KETTLE_CARTE_JETTY_ACCEPT_QUEUE_SIZE ) ) ); - log.logBasic( BaseMessages - .getString( PKG, "WebServer.Log.ConfigOptions", "acceptQueueSize", connector.getAcceptQueueSize() ) ); - } - - if ( validProperty( Const.KETTLE_CARTE_JETTY_RES_MAX_IDLE_TIME ) ) { - connector.setLowResourceMaxIdleTime( - Integer.parseInt( System.getProperty( Const.KETTLE_CARTE_JETTY_RES_MAX_IDLE_TIME ) ) ); - log.logBasic( BaseMessages.getString( PKG, "WebServer.Log.ConfigOptions", "lowResourcesMaxIdleTime", - connector.getLowResourceMaxIdleTime() ) ); - } - - } - - /** - * Checks if the property is not null or not empty String that can be parseable as int and returns true if it is, - * otherwise false - * - * @param property the property to check - * @return true if the property is not null or not empty String that can be parseable as int, false otherwise - */ - private boolean validProperty( String property ) { - boolean isValid = false; - if ( System.getProperty( property ) != null && System.getProperty( property ).length() > 0 ) { - try { - Integer.parseInt( System.getProperty( property ) ); - isValid = true; - } catch ( NumberFormatException nmbfExc ) { - log.logBasic( BaseMessages - .getString( PKG, "WebServer.Log.ConfigOptionsInvalid", property, System.getProperty( property ) ) ); - } - } - return isValid; - } - - /** - * @return the hostname - */ - public String getHostname() { - return hostname; - } - - /** - * @param hostname the hostname to set - */ - public void setHostname( String hostname ) { - this.hostname = hostname; - } - - /** - * @return the slave server detections - */ - public List getDetections() { - return detections; - } - - /** - * This method registers a timer to check up on all the registered slave servers every X seconds.
- */ - private void startSlaveMonitoring() { - slaveMonitoringTimer = new Timer( "WebServer Timer" ); - TimerTask timerTask = new TimerTask() { - - public void run() { - for ( SlaveServerDetection slaveServerDetection : detections ) { - SlaveServer slaveServer = slaveServerDetection.getSlaveServer(); - - // See if we can get a status... - // - try { - // TODO: consider making this lighter or retaining more information... - slaveServer.getStatus(); // throws the exception - slaveServerDetection.setActive( true ); - slaveServerDetection.setLastActiveDate( new Date() ); - } catch ( Exception e ) { - slaveServerDetection.setActive( false ); - slaveServerDetection.setLastInactiveDate( new Date() ); - - // TODO: kick it out after a configurable period of time... - } + + if (validProperty(Const.KETTLE_CARTE_JETTY_ACCEPT_QUEUE_SIZE)) { + connector + .setAcceptQueueSize(Integer.parseInt(System.getProperty(Const.KETTLE_CARTE_JETTY_ACCEPT_QUEUE_SIZE))); + log.logBasic(BaseMessages + .getString(PKG, "WebServer.Log.ConfigOptions", "acceptQueueSize", connector.getAcceptQueueSize())); + } + + if (validProperty(Const.KETTLE_CARTE_JETTY_RES_MAX_IDLE_TIME)) { + connector.setLowResourceMaxIdleTime( + Integer.parseInt(System.getProperty(Const.KETTLE_CARTE_JETTY_RES_MAX_IDLE_TIME))); + log.logBasic(BaseMessages.getString(PKG, "WebServer.Log.ConfigOptions", "lowResourcesMaxIdleTime", + connector.getLowResourceMaxIdleTime())); + } + + } + + /** + * Checks if the property is not null or not empty String that can be parseable as int and returns true if it is, + * otherwise false + * + * @param property the property to check + * @return true if the property is not null or not empty String that can be parseable as int, false otherwise + */ + private boolean validProperty(String property) { + boolean isValid = false; + if (System.getProperty(property) != null && System.getProperty(property).length() > 0) { + try { + Integer.parseInt(System.getProperty(property)); + isValid = true; + } catch (NumberFormatException nmbfExc) { + log.logBasic(BaseMessages + .getString(PKG, "WebServer.Log.ConfigOptionsInvalid", property, System.getProperty(property))); + } } - } - }; - slaveMonitoringTimer.schedule( timerTask, 20000, 20000 ); - } - - /** - * @return the socketRepository - */ - public SocketRepository getSocketRepository() { - return socketRepository; - } - - /** - * @param socketRepository the socketRepository to set - */ - public void setSocketRepository( SocketRepository socketRepository ) { - this.socketRepository = socketRepository; - } - - public String getPasswordFile() { - return passwordFile; - } - - public void setPasswordFile( String passwordFile ) { - this.passwordFile = passwordFile; - } - - public LogChannelInterface getLog() { - return log; - } - - public void setLog( LogChannelInterface log ) { - this.log = log; - } - - public TransformationMap getTransformationMap() { - return transformationMap; - } - - public void setTransformationMap( TransformationMap transformationMap ) { - this.transformationMap = transformationMap; - } - - public JobMap getJobMap() { - return jobMap; - } - - public void setJobMap( JobMap jobMap ) { - this.jobMap = jobMap; - } - - public int getPort() { - return port; - } - - public void setPort( int port ) { - this.port = port; - } - - public Timer getSlaveMonitoringTimer() { - return slaveMonitoringTimer; - } - - public void setSlaveMonitoringTimer( Timer slaveMonitoringTimer ) { - this.slaveMonitoringTimer = slaveMonitoringTimer; - } - - public void setServer( Server server ) { - this.server = server; - } - - public void setDetections( List detections ) { - this.detections = detections; - } - - /** - * Can be used to override the default shutdown behavior of performing a System.exit - * - * @param webServerShutdownHandler - */ - public void setWebServerShutdownHandler( IWebServerShutdownHandler webServerShutdownHandler ) { - this.webServerShutdownHandler = webServerShutdownHandler; - } + return isValid; + } + + /** + * @return the hostname + */ + public String getHostname() { + return hostname; + } + + /** + * @param hostname the hostname to set + */ + public void setHostname(String hostname) { + this.hostname = hostname; + } + + /** + * @return the slave server detections + */ + public List getDetections() { + return detections; + } + + /** + * This method registers a timer to check up on all the registered slave servers every X seconds.
+ */ + private void startSlaveMonitoring() { + slaveMonitoringTimer = new Timer("WebServer Timer"); + TimerTask timerTask = new TimerTask() { + + public void run() { + for (SlaveServerDetection slaveServerDetection : detections) { + SlaveServer slaveServer = slaveServerDetection.getSlaveServer(); + + // See if we can get a status... + // + try { + // TODO: consider making this lighter or retaining more information... + slaveServer.getStatus(); // throws the exception + slaveServerDetection.setActive(true); + slaveServerDetection.setLastActiveDate(new Date()); + } catch (Exception e) { + slaveServerDetection.setActive(false); + slaveServerDetection.setLastInactiveDate(new Date()); + + // TODO: kick it out after a configurable period of time... + } + } + } + }; + slaveMonitoringTimer.schedule(timerTask, 20000, 20000); + } + + /** + * @return the socketRepository + */ + public SocketRepository getSocketRepository() { + return socketRepository; + } + + /** + * @param socketRepository the socketRepository to set + */ + public void setSocketRepository(SocketRepository socketRepository) { + this.socketRepository = socketRepository; + } + + public String getPasswordFile() { + return passwordFile; + } + + public void setPasswordFile(String passwordFile) { + this.passwordFile = passwordFile; + } + + public LogChannelInterface getLog() { + return log; + } + + public void setLog(LogChannelInterface log) { + this.log = log; + } + + public TransformationMap getTransformationMap() { + return transformationMap; + } + + public void setTransformationMap(TransformationMap transformationMap) { + this.transformationMap = transformationMap; + } + + public JobMap getJobMap() { + return jobMap; + } + + public void setJobMap(JobMap jobMap) { + this.jobMap = jobMap; + } + + public int getPort() { + return port; + } + + public void setPort(int port) { + this.port = port; + } + + public Timer getSlaveMonitoringTimer() { + return slaveMonitoringTimer; + } + + public void setSlaveMonitoringTimer(Timer slaveMonitoringTimer) { + this.slaveMonitoringTimer = slaveMonitoringTimer; + } + + public void setServer(Server server) { + this.server = server; + } + + public void setDetections(List detections) { + this.detections = detections; + } + + /** + * Can be used to override the default shutdown behavior of performing a System.exit + * + * @param webServerShutdownHandler + */ + public void setWebServerShutdownHandler(IWebServerShutdownHandler webServerShutdownHandler) { + this.webServerShutdownHandler = webServerShutdownHandler; + } } diff --git a/pentaho-kettle/src/main/resources/kettle-servlets.xml b/pentaho-kettle/src/main/resources/kettle-servlets.xml index bce1b63..82f3c09 100644 --- a/pentaho-kettle/src/main/resources/kettle-servlets.xml +++ b/pentaho-kettle/src/main/resources/kettle-servlets.xml @@ -12,52 +12,151 @@ - Get the status of the server org.pentaho.di.www.GetStatusServlet - The the status of a transformation org.pentaho.di.www.GetTransStatusServlet - Prepare the execution of a transformation org.pentaho.di.www.PrepareExecutionTransServlet - Start the execution of a transformation org.pentaho.di.www.StartExecutionTransServlet - Prepare and start the execution of a transformation org.pentaho.di.www.StartTransServlet - Pause or continue a transformation org.pentaho.di.www.PauseTransServlet - Stop a transformation org.pentaho.di.www.StopTransServlet - Cleanup a transformation: close remote sockets, ... org.pentaho.di.www.CleanupTransServlet - Add a transformation for execution org.pentaho.di.www.AddTransServlet - Remove a transformation org.pentaho.di.www.RemoveTransServlet - Service for the allocation of server sockets org.pentaho.di.www.AllocateServerSocketServlet - Lists server socket allocation information org.pentaho.di.www.ListServerSocketServlet - Sniff test a transformation step org.pentaho.di.www.SniffStepServlet - Execute (prepare and start) a specific transformation and pass output to the servlet org.pentaho.di.www.ExecuteTransServlet - Generate a PNG image of a transformation org.pentaho.di.www.GetTransImageServlet - Run a transformation directly from a repository org.pentaho.di.www.RunTransServlet + + Get the status of the server + org.pentaho.di.www.GetStatusServlet + + + The the status of a transformation + org.pentaho.di.www.GetTransStatusServlet + + + Prepare the execution of a transformation + org.pentaho.di.www.PrepareExecutionTransServlet + + + Start the execution of a transformation + org.pentaho.di.www.StartExecutionTransServlet + + + Prepare and start the execution of a transformation + org.pentaho.di.www.StartTransServlet + + + Pause or continue a transformation + org.pentaho.di.www.PauseTransServlet + + + Stop a transformation + org.pentaho.di.www.StopTransServlet + + + Cleanup a transformation: close remote sockets, ... + org.pentaho.di.www.CleanupTransServlet + + + Add a transformation for execution + org.pentaho.di.www.AddTransServlet + + + Remove a transformation + org.pentaho.di.www.RemoveTransServlet + + + Service for the allocation of server sockets + org.pentaho.di.www.AllocateServerSocketServlet + + + Lists server socket allocation information + org.pentaho.di.www.ListServerSocketServlet + + + Sniff test a transformation step + org.pentaho.di.www.SniffStepServlet + + + Execute (prepare and start) a specific transformation and pass output to the servlet + org.pentaho.di.www.ExecuteTransServlet + + + Generate a PNG image of a transformation + org.pentaho.di.www.GetTransImageServlet + + + Run a transformation directly from a repository + org.pentaho.di.www.RunTransServlet + - Start a job org.pentaho.di.www.StartJobServlet - Stop a job org.pentaho.di.www.StopJobServlet - Get the status of a job org.pentaho.di.www.GetJobStatusServlet - Add a job to the server org.pentaho.di.www.AddJobServlet - Remove a job from the server org.pentaho.di.www.RemoveJobServlet - Run a job directly from a repository org.pentaho.di.www.RunJobServlet - Execute (prepare and start) a specific job org.pentaho.di.www.ExecuteJobServlet - Generate a PNG image of a job org.pentaho.di.www.GetJobImageServlet - - Add a job to the server org.pentaho.di.www.RegisterJobServlet - Add a transformation to the server org.pentaho.di.www.RegisterTransServlet - Upload a resources export file org.pentaho.di.www.RegisterPackageServlet + + Start a job + org.pentaho.di.www.StartJobServlet + + + Stop a job + org.pentaho.di.www.StopJobServlet + + + Get the status of a job + org.pentaho.di.www.GetJobStatusServlet + + + Add a job to the server + org.pentaho.di.www.AddJobServlet + + + Remove a job from the server + org.pentaho.di.www.RemoveJobServlet + + + Run a job directly from a repository + org.pentaho.di.www.RunJobServlet + + + Execute (prepare and start) a specific job + org.pentaho.di.www.ExecuteJobServlet + + + Generate a PNG image of a job + org.pentaho.di.www.GetJobImageServlet + + + + Add a job to the server + org.pentaho.di.www.RegisterJobServlet + + + Add a transformation to the server + org.pentaho.di.www.RegisterTransServlet + + + Upload a resources export file + org.pentaho.di.www.RegisterPackageServlet + - Register a slave server org.pentaho.di.www.RegisterSlaveServlet - List all registered slave servers org.pentaho.di.www.GetSlavesServlet - Stop Carte Server org.pentaho.di.www.StopCarteServlet - Get properties from kettle.properties org.pentaho.di.www.GetPropertiesServlet + + Register a slave server + org.pentaho.di.www.RegisterSlaveServlet + + + List all registered slave servers + org.pentaho.di.www.GetSlavesServlet + + + Stop Carte Server + org.pentaho.di.www.StopCarteServlet + + + Get properties from kettle.properties + org.pentaho.di.www.GetPropertiesServlet + - Upload a resources export file org.pentaho.di.www.AddExportServlet + + Upload a resources export file + org.pentaho.di.www.AddExportServlet + - Get the next block of values for a sequence org.pentaho.di.www.NextSequenceValueServlet + + Get the next block of values for a sequence + org.pentaho.di.www.NextSequenceValueServlet + diff --git a/pentaho-platform/pom.xml b/pentaho-platform/pom.xml index 9c16b59..fc1323c 100644 --- a/pentaho-platform/pom.xml +++ b/pentaho-platform/pom.xml @@ -1,4 +1,5 @@ - + 4.0.0 com.github.zhicwu diff --git a/pentaho-platform/src/main/java/org/pentaho/platform/scheduler2/quartz/ActionAdapterQuartzJob.java b/pentaho-platform/src/main/java/org/pentaho/platform/scheduler2/quartz/ActionAdapterQuartzJob.java index 12a781f..b6bc6cc 100644 --- a/pentaho-platform/src/main/java/org/pentaho/platform/scheduler2/quartz/ActionAdapterQuartzJob.java +++ b/pentaho-platform/src/main/java/org/pentaho/platform/scheduler2/quartz/ActionAdapterQuartzJob.java @@ -17,15 +17,6 @@ package org.pentaho.platform.scheduler2.quartz; -import java.io.OutputStream; -import java.io.Serializable; -import java.text.MessageFormat; -import java.util.Date; -import java.util.HashMap; -import java.util.Locale; -import java.util.Map; -import java.util.concurrent.Callable; - import org.apache.commons.io.IOUtils; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; @@ -42,11 +33,7 @@ import org.pentaho.platform.api.repository2.unified.IUnifiedRepository; import org.pentaho.platform.api.repository2.unified.RepositoryFile; import org.pentaho.platform.api.repository2.unified.data.simple.SimpleRepositoryFileData; -import org.pentaho.platform.api.scheduler2.IBackgroundExecutionStreamProvider; -import org.pentaho.platform.api.scheduler2.IBlockoutManager; -import org.pentaho.platform.api.scheduler2.IJobTrigger; -import org.pentaho.platform.api.scheduler2.IScheduler; -import org.pentaho.platform.api.scheduler2.SimpleJobTrigger; +import org.pentaho.platform.api.scheduler2.*; import org.pentaho.platform.engine.core.system.PentahoSystem; import org.pentaho.platform.engine.security.SecurityHelper; import org.pentaho.platform.engine.services.solution.ActionSequenceCompatibilityFormatter; @@ -61,391 +48,400 @@ import org.quartz.JobExecutionContext; import org.quartz.JobExecutionException; +import java.io.OutputStream; +import java.io.Serializable; +import java.text.MessageFormat; +import java.util.Date; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; +import java.util.concurrent.Callable; + /** * A Quartz job that is responsible for executing the {@link IAction} referred to in the job context. - * + * * @author aphillips */ public class ActionAdapterQuartzJob implements Job { - static final Log log = LogFactory.getLog( ActionAdapterQuartzJob.class ); - private static final long RETRY_COUNT = 6; - private static final long RETRY_SLEEP_AMOUNT = 10000; - - private String outputFilePath = null; - private Object lock = new Object(); + static final Log log = LogFactory.getLog(ActionAdapterQuartzJob.class); + private static final long RETRY_COUNT = 6; + private static final long RETRY_SLEEP_AMOUNT = 10000; - protected Class resolveClass( JobDataMap jobDataMap ) throws PluginBeanException, JobExecutionException { - String actionClass = jobDataMap.getString( QuartzScheduler.RESERVEDMAPKEY_ACTIONCLASS ); - String actionId = jobDataMap.getString( QuartzScheduler.RESERVEDMAPKEY_ACTIONID ); + private String outputFilePath = null; + private Object lock = new Object(); - Class clazz = null; + protected Class resolveClass(JobDataMap jobDataMap) throws PluginBeanException, JobExecutionException { + String actionClass = jobDataMap.getString(QuartzScheduler.RESERVEDMAPKEY_ACTIONCLASS); + String actionId = jobDataMap.getString(QuartzScheduler.RESERVEDMAPKEY_ACTIONID); - if ( StringUtils.isEmpty( actionId ) && StringUtils.isEmpty( actionClass ) ) { - throw new LoggingJobExecutionException( Messages.getInstance().getErrorString( - "ActionAdapterQuartzJob.ERROR_0001_REQUIRED_PARAM_MISSING", //$NON-NLS-1$ - QuartzScheduler.RESERVEDMAPKEY_ACTIONCLASS, QuartzScheduler.RESERVEDMAPKEY_ACTIONID ) ); - } + Class clazz = null; - for ( int i = 0; i < RETRY_COUNT; i++ ) { - try { - if ( !StringUtils.isEmpty( actionId ) ) { - IPluginManager pluginManager = PentahoSystem.get( IPluginManager.class ); - clazz = pluginManager.loadClass( actionId ); - return clazz; - } else if ( !StringUtils.isEmpty( actionClass ) ) { - clazz = Class.forName( actionClass ); - return clazz; + if (StringUtils.isEmpty(actionId) && StringUtils.isEmpty(actionClass)) { + throw new LoggingJobExecutionException(Messages.getInstance().getErrorString( + "ActionAdapterQuartzJob.ERROR_0001_REQUIRED_PARAM_MISSING", //$NON-NLS-1$ + QuartzScheduler.RESERVEDMAPKEY_ACTIONCLASS, QuartzScheduler.RESERVEDMAPKEY_ACTIONID)); } - } catch ( Throwable t ) { - try { - Thread.sleep( RETRY_SLEEP_AMOUNT ); - } catch ( InterruptedException ie ) { - log.info( ie.getMessage(), ie ); + + for (int i = 0; i < RETRY_COUNT; i++) { + try { + if (!StringUtils.isEmpty(actionId)) { + IPluginManager pluginManager = PentahoSystem.get(IPluginManager.class); + clazz = pluginManager.loadClass(actionId); + return clazz; + } else if (!StringUtils.isEmpty(actionClass)) { + clazz = Class.forName(actionClass); + return clazz; + } + } catch (Throwable t) { + try { + Thread.sleep(RETRY_SLEEP_AMOUNT); + } catch (InterruptedException ie) { + log.info(ie.getMessage(), ie); + } + } } - } - } - // we have failed to locate the class for the actionClass - // and we're giving up waiting for it to become available/registered - // which can typically happen at system startup - throw new LoggingJobExecutionException( Messages.getInstance().getErrorString( - "ActionAdapterQuartzJob.ERROR_0002_FAILED_TO_CREATE_ACTION", //$NON-NLS-1$ - StringUtils.isEmpty( actionId ) ? actionClass : actionId ) ); - } - - @SuppressWarnings( "unchecked" ) - public void execute( JobExecutionContext context ) throws JobExecutionException { - JobDataMap jobDataMap = context.getMergedJobDataMap(); - String actionUser = jobDataMap.getString( QuartzScheduler.RESERVEDMAPKEY_ACTIONUSER ); - - Object bean; - Class actionClass = null; - try { - actionClass = resolveClass( jobDataMap ); - bean = actionClass.newInstance(); - } catch ( Exception e ) { - throw new LoggingJobExecutionException( Messages.getInstance().getErrorString( - "ActionAdapterQuartzJob.ERROR_0002_FAILED_TO_CREATE_ACTION", //$NON-NLS-1$ - ( actionClass == null ) ? "unknown" : actionClass.getName() ), e ); //$NON-NLS-1$ + // we have failed to locate the class for the actionClass + // and we're giving up waiting for it to become available/registered + // which can typically happen at system startup + throw new LoggingJobExecutionException(Messages.getInstance().getErrorString( + "ActionAdapterQuartzJob.ERROR_0002_FAILED_TO_CREATE_ACTION", //$NON-NLS-1$ + StringUtils.isEmpty(actionId) ? actionClass : actionId)); } - if ( !( bean instanceof IAction ) ) { - throw new LoggingJobExecutionException( Messages.getInstance().getErrorString( - "ActionAdapterQuartzJob.ERROR_0003_ACTION_WRONG_TYPE", actionClass.getName(), //$NON-NLS-1$ - IAction.class.getName() ) ); - } + @SuppressWarnings("unchecked") + public void execute(JobExecutionContext context) throws JobExecutionException { + JobDataMap jobDataMap = context.getMergedJobDataMap(); + String actionUser = jobDataMap.getString(QuartzScheduler.RESERVEDMAPKEY_ACTIONUSER); - final IAction actionBean = (IAction) bean; + Object bean; + Class actionClass = null; + try { + actionClass = resolveClass(jobDataMap); + bean = actionClass.newInstance(); + } catch (Exception e) { + throw new LoggingJobExecutionException(Messages.getInstance().getErrorString( + "ActionAdapterQuartzJob.ERROR_0002_FAILED_TO_CREATE_ACTION", //$NON-NLS-1$ + (actionClass == null) ? "unknown" : actionClass.getName()), e); //$NON-NLS-1$ + } - try { - invokeAction( actionBean, actionUser, context, jobDataMap.getWrappedMap() ); + if (!(bean instanceof IAction)) { + throw new LoggingJobExecutionException(Messages.getInstance().getErrorString( + "ActionAdapterQuartzJob.ERROR_0003_ACTION_WRONG_TYPE", actionClass.getName(), //$NON-NLS-1$ + IAction.class.getName())); + } - } catch ( Throwable t ) { - // ensure that scheduler thread isn't blocked on lock - synchronized ( lock ) { - lock.notifyAll(); - } + final IAction actionBean = (IAction) bean; - // We should not distinguish between checked and unchecked exceptions here. All job execution failures - // should result in a rethrow of a quartz exception - throw new LoggingJobExecutionException( Messages.getInstance().getErrorString( - "ActionAdapterQuartzJob.ERROR_0004_ACTION_FAILED", actionBean //$NON-NLS-1$ - .getClass().getName() ), t ); - } - } - - protected void invokeAction( final IAction actionBean, final String actionUser, final JobExecutionContext context, - final Map params ) throws Exception { - - final IScheduler scheduler = PentahoSystem.getObjectFactory().get( IScheduler.class, "IScheduler2", null ); - final Map jobParams = new HashMap( params ); // shallow copy - - // remove the scheduling infrastructure properties - params.remove( QuartzScheduler.RESERVEDMAPKEY_ACTIONCLASS ); - params.remove( QuartzScheduler.RESERVEDMAPKEY_ACTIONID ); - params.remove( QuartzScheduler.RESERVEDMAPKEY_ACTIONUSER ); - Object objsp = params.get( QuartzScheduler.RESERVEDMAPKEY_STREAMPROVIDER ); - IBackgroundExecutionStreamProvider sp = null; - if ( objsp != null && IBackgroundExecutionStreamProvider.class.isAssignableFrom( objsp.getClass() ) ) { - sp = (IBackgroundExecutionStreamProvider) objsp; - } - final IBackgroundExecutionStreamProvider streamProvider = sp; - params.remove( QuartzScheduler.RESERVEDMAPKEY_STREAMPROVIDER ); - params.remove( QuartzScheduler.RESERVEDMAPKEY_UIPASSPARAM ); - // The scheduled_fire_time is useful only to the blockoutAction see PDI-10171 - if ( actionBean instanceof BlockoutAction ) { - params.put( IBlockoutManager.SCHEDULED_FIRE_TIME, context.getScheduledFireTime() ); - } + try { + invokeAction(actionBean, actionUser, context, jobDataMap.getWrappedMap()); + + } catch (Throwable t) { + // ensure that scheduler thread isn't blocked on lock + synchronized (lock) { + lock.notifyAll(); + } - if ( log.isDebugEnabled() ) { - log.debug( MessageFormat.format( - "Scheduling system invoking action {0} as user {1} with params [ {2} ]", actionBean //$NON-NLS-1$ - .getClass().getName(), actionUser, QuartzScheduler.prettyPrintMap( params ) ) ); + // We should not distinguish between checked and unchecked exceptions here. All job execution failures + // should result in a rethrow of a quartz exception + throw new LoggingJobExecutionException(Messages.getInstance().getErrorString( + "ActionAdapterQuartzJob.ERROR_0004_ACTION_FAILED", actionBean //$NON-NLS-1$ + .getClass().getName()), t); + } } - Callable actionBeanRunner = new Callable() { + protected void invokeAction(final IAction actionBean, final String actionUser, final JobExecutionContext context, + final Map params) throws Exception { - public Boolean call() throws Exception { - LocaleHelper.setLocaleOverride( (Locale) params.get( LocaleHelper.USER_LOCALE_PARAM ) ); - // sync job params to the action bean - ActionHarness actionHarness = new ActionHarness( actionBean ); - boolean updateJob = false; + final IScheduler scheduler = PentahoSystem.getObjectFactory().get(IScheduler.class, "IScheduler2", null); + final Map jobParams = new HashMap(params); // shallow copy - final Map actionParams = new HashMap(); - actionParams.putAll( params ); - if ( streamProvider != null ) { - actionParams.put( "inputStream", streamProvider.getInputStream() ); + // remove the scheduling infrastructure properties + params.remove(QuartzScheduler.RESERVEDMAPKEY_ACTIONCLASS); + params.remove(QuartzScheduler.RESERVEDMAPKEY_ACTIONID); + params.remove(QuartzScheduler.RESERVEDMAPKEY_ACTIONUSER); + Object objsp = params.get(QuartzScheduler.RESERVEDMAPKEY_STREAMPROVIDER); + IBackgroundExecutionStreamProvider sp = null; + if (objsp != null && IBackgroundExecutionStreamProvider.class.isAssignableFrom(objsp.getClass())) { + sp = (IBackgroundExecutionStreamProvider) objsp; + } + final IBackgroundExecutionStreamProvider streamProvider = sp; + params.remove(QuartzScheduler.RESERVEDMAPKEY_STREAMPROVIDER); + params.remove(QuartzScheduler.RESERVEDMAPKEY_UIPASSPARAM); + // The scheduled_fire_time is useful only to the blockoutAction see PDI-10171 + if (actionBean instanceof BlockoutAction) { + params.put(IBlockoutManager.SCHEDULED_FIRE_TIME, context.getScheduledFireTime()); } - actionHarness.setValues( actionParams, new ActionSequenceCompatibilityFormatter() ); - if ( actionBean instanceof IVarArgsAction ) { - actionParams.remove( "inputStream" ); - actionParams.remove( "outputStream" ); - ( (IVarArgsAction) actionBean ).setVarArgs( actionParams ); + if (log.isDebugEnabled()) { + log.debug(MessageFormat.format( + "Scheduling system invoking action {0} as user {1} with params [ {2} ]", actionBean //$NON-NLS-1$ + .getClass().getName(), actionUser, QuartzScheduler.prettyPrintMap(params))); } - boolean waitForFileCreated = false; - OutputStream stream = null; - - if ( streamProvider != null ) { - actionParams.remove( "inputStream" ); - if ( actionBean instanceof IStreamingAction ) { - streamProvider.setStreamingAction( (IStreamingAction) actionBean ); - } - - // BISERVER-9414 - validate that output path still exist - SchedulerOutputPathResolver resolver = - new SchedulerOutputPathResolver( streamProvider.getOutputPath(), actionUser ); - String outputPath = resolver.resolveOutputFilePath(); - actionParams.put( "useJcr", Boolean.TRUE ); - actionParams.put( "jcrOutputPath", outputPath.substring( 0, outputPath.lastIndexOf( "/" ) ) ); - - if ( !outputPath.equals( streamProvider.getOutputPath() ) ) { - streamProvider.setOutputFilePath( outputPath ); // set fallback path - updateJob = true; // job needs to be deleted and recreated with the new output path - } - - stream = streamProvider.getOutputStream(); - if ( stream instanceof ISourcesStreamEvents ) { - ( (ISourcesStreamEvents) stream ).addListener( new IStreamListener() { - public void fileCreated( final String filePath ) { - synchronized ( lock ) { - outputFilePath = filePath; - lock.notifyAll(); + Callable actionBeanRunner = new Callable() { + + public Boolean call() throws Exception { + LocaleHelper.setLocaleOverride((Locale) params.get(LocaleHelper.USER_LOCALE_PARAM)); + // sync job params to the action bean + ActionHarness actionHarness = new ActionHarness(actionBean); + boolean updateJob = false; + + final Map actionParams = new HashMap(); + actionParams.putAll(params); + if (streamProvider != null) { + actionParams.put("inputStream", streamProvider.getInputStream()); } - } - } ); - waitForFileCreated = true; - } - actionParams.put( "outputStream", stream ); - // The lineage_id is only useful for the metadata and not needed at this level see PDI-10171 - actionParams.remove( QuartzScheduler.RESERVEDMAPKEY_LINEAGE_ID ); - actionHarness.setValues( actionParams ); - } + actionHarness.setValues(actionParams, new ActionSequenceCompatibilityFormatter()); - actionBean.execute(); + if (actionBean instanceof IVarArgsAction) { + actionParams.remove("inputStream"); + actionParams.remove("outputStream"); + ((IVarArgsAction) actionBean).setVarArgs(actionParams); + } - if ( stream != null ) { - IOUtils.closeQuietly( stream ); - } + boolean waitForFileCreated = false; + OutputStream stream = null; + + if (streamProvider != null) { + actionParams.remove("inputStream"); + if (actionBean instanceof IStreamingAction) { + streamProvider.setStreamingAction((IStreamingAction) actionBean); + } + + // BISERVER-9414 - validate that output path still exist + SchedulerOutputPathResolver resolver = + new SchedulerOutputPathResolver(streamProvider.getOutputPath(), actionUser); + String outputPath = resolver.resolveOutputFilePath(); + actionParams.put("useJcr", Boolean.TRUE); + actionParams.put("jcrOutputPath", outputPath.substring(0, outputPath.lastIndexOf("/"))); + + if (!outputPath.equals(streamProvider.getOutputPath())) { + streamProvider.setOutputFilePath(outputPath); // set fallback path + updateJob = true; // job needs to be deleted and recreated with the new output path + } + + stream = streamProvider.getOutputStream(); + if (stream instanceof ISourcesStreamEvents) { + ((ISourcesStreamEvents) stream).addListener(new IStreamListener() { + public void fileCreated(final String filePath) { + synchronized (lock) { + outputFilePath = filePath; + lock.notifyAll(); + } + } + }); + waitForFileCreated = true; + } + actionParams.put("outputStream", stream); + // The lineage_id is only useful for the metadata and not needed at this level see PDI-10171 + actionParams.remove(QuartzScheduler.RESERVEDMAPKEY_LINEAGE_ID); + actionHarness.setValues(actionParams); + } - if ( waitForFileCreated ) { - synchronized ( lock ) { - if ( outputFilePath == null ) { - lock.wait(); + actionBean.execute(); + + if (stream != null) { + IOUtils.closeQuietly(stream); + } + + if (waitForFileCreated) { + synchronized (lock) { + if (outputFilePath == null) { + lock.wait(); + } + } + sendEmail(actionParams, params, outputFilePath); + } + if (actionBean instanceof IPostProcessingAction) { + markContentAsGenerated((IPostProcessingAction) actionBean); + } + return updateJob; } - } - sendEmail( actionParams, params, outputFilePath ); - } - if ( actionBean instanceof IPostProcessingAction ) { - markContentAsGenerated( (IPostProcessingAction) actionBean ); - } - return updateJob; - } - - private void markContentAsGenerated( IPostProcessingAction actionBean ) { - IUnifiedRepository repo = PentahoSystem.get( IUnifiedRepository.class ); - String lineageId = (String) params.get( QuartzScheduler.RESERVEDMAPKEY_LINEAGE_ID ); - for ( IContentItem contentItem : actionBean.getActionOutputContents() ) { - RepositoryFile sourceFile = repo.getFile( contentItem.getPath() ); - // add metadata iof we have access and we have file - if ( sourceFile != null ) { - Map metadata = repo.getFileMetadata( sourceFile.getId() ); - metadata.put( QuartzScheduler.RESERVEDMAPKEY_LINEAGE_ID, lineageId ); - repo.setFileMetadata( sourceFile.getId(), metadata ); - } - } - } - }; - - boolean requiresUpdate = false; - if ( ( actionUser == null ) || ( actionUser.equals( "system session" ) ) ) { //$NON-NLS-1$ - // For now, don't try to run quartz jobs as authenticated if the user - // that created the job is a system user. See PPP-2350 - requiresUpdate = SecurityHelper.getInstance().runAsAnonymous( actionBeanRunner ); - } else { - try { - requiresUpdate = SecurityHelper.getInstance().runAsUser( actionUser, actionBeanRunner ); - } catch ( Throwable t ) { - Object restartFlag = jobParams.get( QuartzScheduler.RESERVEDMAPKEY_RESTART_FLAG ); - if ( restartFlag == null ) { - final SimpleJobTrigger trigger = new SimpleJobTrigger( new Date(), null, 0, 0 ); - final Class iaction = (Class) actionBean.getClass(); - // recreate the job in the context of the original creator - SecurityHelper.getInstance().runAsUser( actionUser, new Callable() { - @Override - public Void call() throws Exception { - if ( streamProvider != null ) { - streamProvider.setStreamingAction( null ); // remove generated content - } - QuartzJobKey jobKey = QuartzJobKey.parse( context.getJobDetail().getName() ); - String jobName = jobKey.getJobName(); - jobParams.put( QuartzScheduler.RESERVEDMAPKEY_RESTART_FLAG, Boolean.TRUE ); - scheduler.createJob( jobName, iaction, jobParams, trigger, streamProvider ); - log.warn( "New RunOnce job created for " + jobName + " -> possible startup synchronization error" ); - return null; + + private void markContentAsGenerated(IPostProcessingAction actionBean) { + IUnifiedRepository repo = PentahoSystem.get(IUnifiedRepository.class); + String lineageId = (String) params.get(QuartzScheduler.RESERVEDMAPKEY_LINEAGE_ID); + for (IContentItem contentItem : actionBean.getActionOutputContents()) { + RepositoryFile sourceFile = repo.getFile(contentItem.getPath()); + // add metadata iof we have access and we have file + if (sourceFile != null) { + Map metadata = repo.getFileMetadata(sourceFile.getId()); + metadata.put(QuartzScheduler.RESERVEDMAPKEY_LINEAGE_ID, lineageId); + repo.setFileMetadata(sourceFile.getId(), metadata); + } + } } - } ); + }; + + boolean requiresUpdate = false; + if ((actionUser == null) || (actionUser.equals("system session"))) { //$NON-NLS-1$ + // For now, don't try to run quartz jobs as authenticated if the user + // that created the job is a system user. See PPP-2350 + requiresUpdate = SecurityHelper.getInstance().runAsAnonymous(actionBeanRunner); } else { - log.warn( "RunOnce already created, skipping" ); - throw new Exception( t ); + try { + requiresUpdate = SecurityHelper.getInstance().runAsUser(actionUser, actionBeanRunner); + } catch (Throwable t) { + Object restartFlag = jobParams.get(QuartzScheduler.RESERVEDMAPKEY_RESTART_FLAG); + if (restartFlag == null) { + final SimpleJobTrigger trigger = new SimpleJobTrigger(new Date(), null, 0, 0); + final Class iaction = (Class) actionBean.getClass(); + // recreate the job in the context of the original creator + SecurityHelper.getInstance().runAsUser(actionUser, new Callable() { + @Override + public Void call() throws Exception { + if (streamProvider != null) { + streamProvider.setStreamingAction(null); // remove generated content + } + QuartzJobKey jobKey = QuartzJobKey.parse(context.getJobDetail().getName()); + String jobName = jobKey.getJobName(); + jobParams.put(QuartzScheduler.RESERVEDMAPKEY_RESTART_FLAG, Boolean.TRUE); + scheduler.createJob(jobName, iaction, jobParams, trigger, streamProvider); + log.warn("New RunOnce job created for " + jobName + " -> possible startup synchronization error"); + return null; + } + }); + } else { + log.warn("RunOnce already created, skipping"); + throw new Exception(t); + } + } } - } - } - scheduler.fireJobCompleted( actionBean, actionUser, params, streamProvider ); - - if ( requiresUpdate ) { - log.warn( "Output path for job: " + context.getJobDetail().getName() + " has changed. Job requires update" ); - try { - final IJobTrigger trigger = scheduler.getJob( context.getJobDetail().getName() ).getJobTrigger(); - final Class iaction = (Class) actionBean.getClass(); - - // remove job with outdated/invalid output path - scheduler.removeJob( context.getJobDetail().getName() ); - - // recreate the job in the context of the original creator - SecurityHelper.getInstance().runAsUser( actionUser, new Callable() { - @Override - public Void call() throws Exception { - streamProvider.setStreamingAction( null ); // remove generated content - QuartzJobKey jobKey = QuartzJobKey.parse( context.getJobDetail().getName() ); - String jobName = jobKey.getJobName(); - org.pentaho.platform.api.scheduler2.Job j = - scheduler.createJob( jobName, iaction, jobParams, trigger, streamProvider ); - log.warn( "New Job: " + j.getJobId() + " created" ); - return null; - } - } ); - } catch ( Exception e ) { - log.error( e.getMessage(), e ); - } - } + scheduler.fireJobCompleted(actionBean, actionUser, params, streamProvider); + + if (requiresUpdate) { + log.warn("Output path for job: " + context.getJobDetail().getName() + " has changed. Job requires update"); + try { + final IJobTrigger trigger = scheduler.getJob(context.getJobDetail().getName()).getJobTrigger(); + final Class iaction = (Class) actionBean.getClass(); + + // remove job with outdated/invalid output path + scheduler.removeJob(context.getJobDetail().getName()); + + // recreate the job in the context of the original creator + SecurityHelper.getInstance().runAsUser(actionUser, new Callable() { + @Override + public Void call() throws Exception { + streamProvider.setStreamingAction(null); // remove generated content + QuartzJobKey jobKey = QuartzJobKey.parse(context.getJobDetail().getName()); + String jobName = jobKey.getJobName(); + org.pentaho.platform.api.scheduler2.Job j = + scheduler.createJob(jobName, iaction, jobParams, trigger, streamProvider); + log.warn("New Job: " + j.getJobId() + " created"); + return null; + } + }); + } catch (Exception e) { + log.error(e.getMessage(), e); + } + } + + if (log.isDebugEnabled()) { + log.debug(MessageFormat.format( + "Scheduling system successfully invoked action {0} as user {1} with params [ {2} ]", actionBean //$NON-NLS-1$ + .getClass().getName(), actionUser, QuartzScheduler.prettyPrintMap(params))); + } - if ( log.isDebugEnabled() ) { - log.debug( MessageFormat.format( - "Scheduling system successfully invoked action {0} as user {1} with params [ {2} ]", actionBean //$NON-NLS-1$ - .getClass().getName(), actionUser, QuartzScheduler.prettyPrintMap( params ) ) ); } - } - - private void sendEmail( Map actionParams, Map params, String filePath ) { - try { - IUnifiedRepository repo = PentahoSystem.get( IUnifiedRepository.class ); - RepositoryFile sourceFile = repo.getFile( filePath ); - // add metadata - Map metadata = repo.getFileMetadata( sourceFile.getId() ); - String lineageId = (String) params.get( QuartzScheduler.RESERVEDMAPKEY_LINEAGE_ID ); - metadata.put( QuartzScheduler.RESERVEDMAPKEY_LINEAGE_ID, lineageId ); - repo.setFileMetadata( sourceFile.getId(), metadata ); - // send email - SimpleRepositoryFileData data = repo.getDataForRead( sourceFile.getId(), SimpleRepositoryFileData.class ); - // if email is setup and we have tos, then do it - Emailer emailer = new Emailer(); - if ( !emailer.setup() ) { - // email not configured - return; - } - String to = (String) actionParams.get( "_SCH_EMAIL_TO" ); - String cc = (String) actionParams.get( "_SCH_EMAIL_CC" ); - String bcc = (String) actionParams.get( "_SCH_EMAIL_BCC" ); - if ( ( to == null || "".equals( to ) ) && ( cc == null || "".equals( cc ) ) - && ( bcc == null || "".equals( bcc ) ) ) { - // no destination - return; - } - emailer.setTo( to ); - emailer.setCc( cc ); - emailer.setBcc( bcc ); - emailer.setAttachment( data.getInputStream() ); - emailer.setAttachmentName( "attachment" ); - String attachmentName = (String) actionParams.get( "_SCH_EMAIL_ATTACHMENT_NAME" ); - if ( attachmentName != null && !"".equals( attachmentName ) ) { - String path = filePath; - if ( path.endsWith( ".*" ) ) { - path = path.replace( ".*", "" ); - } - String extension = MimeHelper.getExtension( data.getMimeType() ); - if ( extension == null ) { - extension = ".bin"; - } - if ( !attachmentName.endsWith( extension ) ) { - emailer.setAttachmentName( attachmentName + extension ); - } else { - emailer.setAttachmentName( attachmentName ); - } - } else if ( data != null ) { - String path = filePath; - if ( path.endsWith( ".*" ) ) { - path = path.replace( ".*", "" ); - } - String extension = MimeHelper.getExtension( data.getMimeType() ); - if ( extension == null ) { - extension = ".bin"; - } - path = path.substring( path.lastIndexOf( "/" ) + 1, path.length() ); - if ( !path.endsWith( extension ) ) { - emailer.setAttachmentName( path + extension ); - } else { - emailer.setAttachmentName( path ); + private void sendEmail(Map actionParams, Map params, String filePath) { + try { + IUnifiedRepository repo = PentahoSystem.get(IUnifiedRepository.class); + RepositoryFile sourceFile = repo.getFile(filePath); + // add metadata + Map metadata = repo.getFileMetadata(sourceFile.getId()); + String lineageId = (String) params.get(QuartzScheduler.RESERVEDMAPKEY_LINEAGE_ID); + metadata.put(QuartzScheduler.RESERVEDMAPKEY_LINEAGE_ID, lineageId); + repo.setFileMetadata(sourceFile.getId(), metadata); + // send email + SimpleRepositoryFileData data = repo.getDataForRead(sourceFile.getId(), SimpleRepositoryFileData.class); + // if email is setup and we have tos, then do it + Emailer emailer = new Emailer(); + if (!emailer.setup()) { + // email not configured + return; + } + String to = (String) actionParams.get("_SCH_EMAIL_TO"); + String cc = (String) actionParams.get("_SCH_EMAIL_CC"); + String bcc = (String) actionParams.get("_SCH_EMAIL_BCC"); + if ((to == null || "".equals(to)) && (cc == null || "".equals(cc)) + && (bcc == null || "".equals(bcc))) { + // no destination + return; + } + emailer.setTo(to); + emailer.setCc(cc); + emailer.setBcc(bcc); + emailer.setAttachment(data.getInputStream()); + emailer.setAttachmentName("attachment"); + String attachmentName = (String) actionParams.get("_SCH_EMAIL_ATTACHMENT_NAME"); + if (attachmentName != null && !"".equals(attachmentName)) { + String path = filePath; + if (path.endsWith(".*")) { + path = path.replace(".*", ""); + } + String extension = MimeHelper.getExtension(data.getMimeType()); + if (extension == null) { + extension = ".bin"; + } + if (!attachmentName.endsWith(extension)) { + emailer.setAttachmentName(attachmentName + extension); + } else { + emailer.setAttachmentName(attachmentName); + } + } else if (data != null) { + String path = filePath; + if (path.endsWith(".*")) { + path = path.replace(".*", ""); + } + String extension = MimeHelper.getExtension(data.getMimeType()); + if (extension == null) { + extension = ".bin"; + } + path = path.substring(path.lastIndexOf("/") + 1, path.length()); + if (!path.endsWith(extension)) { + emailer.setAttachmentName(path + extension); + } else { + emailer.setAttachmentName(path); + } + } + if (data == null || data.getMimeType() == null || "".equals(data.getMimeType())) { + emailer.setAttachmentMimeType("binary/octet-stream"); + } else { + emailer.setAttachmentMimeType(data.getMimeType()); + } + String subject = (String) actionParams.get("_SCH_EMAIL_SUBJECT"); + if (subject != null && !"".equals(subject)) { + emailer.setSubject(subject); + } else { + emailer.setSubject("Pentaho Scheduler: " + emailer.getAttachmentName()); + } + String message = (String) actionParams.get("_SCH_EMAIL_MESSAGE"); + if (subject != null && !"".equals(subject)) { + emailer.setBody(message); + } + emailer.send(); + } catch (Exception e) { + log.warn(e.getMessage(), e); } - } - if ( data == null || data.getMimeType() == null || "".equals( data.getMimeType() ) ) { - emailer.setAttachmentMimeType( "binary/octet-stream" ); - } else { - emailer.setAttachmentMimeType( data.getMimeType() ); - } - String subject = (String) actionParams.get( "_SCH_EMAIL_SUBJECT" ); - if ( subject != null && !"".equals( subject ) ) { - emailer.setSubject( subject ); - } else { - emailer.setSubject( "Pentaho Scheduler: " + emailer.getAttachmentName() ); - } - String message = (String) actionParams.get( "_SCH_EMAIL_MESSAGE" ); - if ( subject != null && !"".equals( subject ) ) { - emailer.setBody( message ); - } - emailer.send(); - } catch ( Exception e ) { - log.warn( e.getMessage(), e ); } - } - class LoggingJobExecutionException extends JobExecutionException { - private static final long serialVersionUID = -4124907454208034326L; + class LoggingJobExecutionException extends JobExecutionException { + private static final long serialVersionUID = -4124907454208034326L; - public LoggingJobExecutionException( String msg ) { - super( msg ); - log.error( msg ); - } + public LoggingJobExecutionException(String msg) { + super(msg); + log.error(msg); + } - public LoggingJobExecutionException( String msg, Throwable t ) { - super( msg, t ); - log.error( msg, t ); - } + public LoggingJobExecutionException(String msg, Throwable t) { + super(msg, t); + log.error(msg, t); + } - } + } } diff --git a/pentaho-platform/src/main/java/org/pentaho/platform/scheduler2/quartz/QuartzScheduler.java b/pentaho-platform/src/main/java/org/pentaho/platform/scheduler2/quartz/QuartzScheduler.java index 86be52e..536c22f 100644 --- a/pentaho-platform/src/main/java/org/pentaho/platform/scheduler2/quartz/QuartzScheduler.java +++ b/pentaho-platform/src/main/java/org/pentaho/platform/scheduler2/quartz/QuartzScheduler.java @@ -22,821 +22,839 @@ import org.apache.commons.logging.LogFactory; import org.pentaho.platform.api.action.IAction; import org.pentaho.platform.api.engine.IPentahoSession; -import org.pentaho.platform.api.scheduler2.ComplexJobTrigger; -import org.pentaho.platform.api.scheduler2.IBackgroundExecutionStreamProvider; -import org.pentaho.platform.api.scheduler2.IJobFilter; -import org.pentaho.platform.api.scheduler2.IJobResult; -import org.pentaho.platform.api.scheduler2.IJobTrigger; -import org.pentaho.platform.api.scheduler2.IScheduleSubject; -import org.pentaho.platform.api.scheduler2.IScheduler; -import org.pentaho.platform.api.scheduler2.ISchedulerListener; +import org.pentaho.platform.api.scheduler2.*; import org.pentaho.platform.api.scheduler2.Job; import org.pentaho.platform.api.scheduler2.Job.JobState; -import org.pentaho.platform.api.scheduler2.JobTrigger; import org.pentaho.platform.api.scheduler2.SchedulerException; -import org.pentaho.platform.api.scheduler2.SimpleJobTrigger; import org.pentaho.platform.api.scheduler2.recur.ITimeRecurrence; import org.pentaho.platform.engine.core.system.PentahoSessionHolder; import org.pentaho.platform.engine.security.SecurityHelper; import org.pentaho.platform.scheduler2.messsages.Messages; -import org.pentaho.platform.scheduler2.recur.IncrementalRecurrence; -import org.pentaho.platform.scheduler2.recur.QualifiedDayOfMonth; -import org.pentaho.platform.scheduler2.recur.QualifiedDayOfWeek; +import org.pentaho.platform.scheduler2.recur.*; import org.pentaho.platform.scheduler2.recur.QualifiedDayOfWeek.DayOfWeek; import org.pentaho.platform.scheduler2.recur.QualifiedDayOfWeek.DayOfWeekQualifier; -import org.pentaho.platform.scheduler2.recur.RecurrenceList; -import org.pentaho.platform.scheduler2.recur.SequentialRecurrence; import org.quartz.Calendar; -import org.quartz.CronTrigger; -import org.quartz.JobDataMap; -import org.quartz.JobDetail; -import org.quartz.Scheduler; -import org.quartz.SchedulerFactory; -import org.quartz.SimpleTrigger; -import org.quartz.Trigger; +import org.quartz.*; import org.quartz.impl.StdSchedulerFactory; import java.io.Serializable; import java.security.Principal; import java.text.MessageFormat; import java.text.ParseException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Date; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.UUID; +import java.util.*; import java.util.regex.Pattern; /** * A Quartz implementation of {@link IScheduler} - * + * * @author aphillips */ public class QuartzScheduler implements IScheduler { - public static final String RESERVEDMAPKEY_ACTIONCLASS = "ActionAdapterQuartzJob-ActionClass"; //$NON-NLS-1$ + public static final String RESERVEDMAPKEY_ACTIONCLASS = "ActionAdapterQuartzJob-ActionClass"; //$NON-NLS-1$ - public static final String RESERVEDMAPKEY_ACTIONUSER = "ActionAdapterQuartzJob-ActionUser"; //$NON-NLS-1$ + public static final String RESERVEDMAPKEY_ACTIONUSER = "ActionAdapterQuartzJob-ActionUser"; //$NON-NLS-1$ - public static final String RESERVEDMAPKEY_ACTIONID = "ActionAdapterQuartzJob-ActionId"; //$NON-NLS-1$ + public static final String RESERVEDMAPKEY_ACTIONID = "ActionAdapterQuartzJob-ActionId"; //$NON-NLS-1$ - public static final String RESERVEDMAPKEY_STREAMPROVIDER = "ActionAdapterQuartzJob-StreamProvider"; //$NON-NLS-1$ + public static final String RESERVEDMAPKEY_STREAMPROVIDER = "ActionAdapterQuartzJob-StreamProvider"; //$NON-NLS-1$ - public static final String RESERVEDMAPKEY_UIPASSPARAM = "uiPassParam"; + public static final String RESERVEDMAPKEY_UIPASSPARAM = "uiPassParam"; - public static final String RESERVEDMAPKEY_LINEAGE_ID = "lineage-id"; + public static final String RESERVEDMAPKEY_LINEAGE_ID = "lineage-id"; - public static final String RESERVEDMAPKEY_RESTART_FLAG = "ActionAdapterQuartzJob-Restart"; + public static final String RESERVEDMAPKEY_RESTART_FLAG = "ActionAdapterQuartzJob-Restart"; - private static final Log logger = LogFactory.getLog( QuartzScheduler.class ); + private static final Log logger = LogFactory.getLog(QuartzScheduler.class); - private SchedulerFactory quartzSchedulerFactory; + private SchedulerFactory quartzSchedulerFactory; - private Scheduler quartzScheduler; + private Scheduler quartzScheduler; - private ArrayList listeners = new ArrayList(); + private ArrayList listeners = new ArrayList(); - private static final Pattern listPattern = Pattern.compile( "\\d+" ); //$NON-NLS-1$ + private static final Pattern listPattern = Pattern.compile("\\d+"); //$NON-NLS-1$ - private static final Pattern dayOfWeekRangePattern = Pattern.compile( ".*\\-.*" ); //$NON-NLS-1$ + private static final Pattern dayOfWeekRangePattern = Pattern.compile(".*\\-.*"); //$NON-NLS-1$ - private static final Pattern sequencePattern = Pattern.compile( "\\d+\\-\\d+" ); //$NON-NLS-1$ + private static final Pattern sequencePattern = Pattern.compile("\\d+\\-\\d+"); //$NON-NLS-1$ - private static final Pattern intervalPattern = Pattern.compile( "\\d+/\\d+" ); //$NON-NLS-1$ + private static final Pattern intervalPattern = Pattern.compile("\\d+/\\d+"); //$NON-NLS-1$ - private static final Pattern qualifiedDayPattern = Pattern.compile( "\\d+#\\d+" ); //$NON-NLS-1$ + private static final Pattern qualifiedDayPattern = Pattern.compile("\\d+#\\d+"); //$NON-NLS-1$ - private static final Pattern lastDayPattern = Pattern.compile( "\\d+L" ); //$NON-NLS-1$ + private static final Pattern lastDayPattern = Pattern.compile("\\d+L"); //$NON-NLS-1$ - public QuartzScheduler( SchedulerFactory schedulerFactory ) { - this.quartzSchedulerFactory = schedulerFactory; - } + public QuartzScheduler(SchedulerFactory schedulerFactory) { + this.quartzSchedulerFactory = schedulerFactory; + } - public QuartzScheduler() { - this.quartzSchedulerFactory = new StdSchedulerFactory(); - } + public QuartzScheduler() { + this.quartzSchedulerFactory = new StdSchedulerFactory(); + } - /** - * Overrides the default Quartz {@link SchedulerFactory}. Note: depending on the type of scheduler you are setting - * here, there may be initializing required prior to this setter being called. Only the - * {@link SchedulerFactory#getScheduler()} will be called later, so the factory set here must already be in a state - * where that invocation will be successful. - * - * @param quartzSchedulerFactory - * the quartz factory to use for generating scheduler instances - */ - public void setQuartzSchedulerFactory( SchedulerFactory quartzSchedulerFactory ) throws SchedulerException { - this.quartzSchedulerFactory = quartzSchedulerFactory; - if( quartzScheduler != null ){ - this.shutdown(); - quartzScheduler = null; + /** + * Overrides the default Quartz {@link SchedulerFactory}. Note: depending on the type of scheduler you are setting + * here, there may be initializing required prior to this setter being called. Only the + * {@link SchedulerFactory#getScheduler()} will be called later, so the factory set here must already be in a state + * where that invocation will be successful. + * + * @param quartzSchedulerFactory the quartz factory to use for generating scheduler instances + */ + public void setQuartzSchedulerFactory(SchedulerFactory quartzSchedulerFactory) throws SchedulerException { + this.quartzSchedulerFactory = quartzSchedulerFactory; + if (quartzScheduler != null) { + this.shutdown(); + quartzScheduler = null; + } } - } - public Scheduler getQuartzScheduler() throws org.quartz.SchedulerException { - if ( quartzScheduler == null ) { + public Scheduler getQuartzScheduler() throws org.quartz.SchedulerException { + if (quartzScheduler == null) { /* * Currently, quartz will always give you the same scheduler object when any factory instance is asked for a * scheduler. In other words there is no such thing as scheduler-level isolation. If we really need multiple * isolated scheduler instances, we should investigate named schedulers, but this API getScheduler() will not help * us in that regard. */ - quartzScheduler = quartzSchedulerFactory.getScheduler(); - } - - logger.debug( "Using quartz scheduler " + quartzScheduler ); //$NON-NLS-1$ - return quartzScheduler; - } - - private void setQuartzScheduler( Scheduler quartzScheduler ) { - this.quartzScheduler = quartzScheduler; - } - - /** {@inheritDoc} */ - public Job createJob( String jobName, String actionId, Map jobParams, IJobTrigger trigger ) - throws SchedulerException { - return createJob( jobName, actionId, jobParams, trigger, null ); - } - - /** {@inheritDoc} */ - public Job createJob( String jobName, Class action, Map jobParams, - IJobTrigger trigger ) throws SchedulerException { - return createJob( jobName, action, jobParams, trigger, null ); - } - - /** {@inheritDoc} */ - public Job createJob( String jobName, Class action, Map jobParams, - IJobTrigger trigger, IBackgroundExecutionStreamProvider outputStreamProvider ) throws SchedulerException { - - if ( action == null ) { - throw new SchedulerException( Messages.getInstance().getString( "QuartzScheduler.ERROR_0003_ACTION_IS_NULL" ) ); //$NON-NLS-1$ - } - - if ( jobParams == null ) { - jobParams = new HashMap(); - } - - jobParams.put( RESERVEDMAPKEY_ACTIONCLASS, action.getName() ); - Job ret = createJob( jobName, jobParams, trigger, outputStreamProvider ); - ret.setSchedulableClass( action.getName() ); - return ret; - } - - /** {@inheritDoc} */ - public Job createJob( String jobName, String actionId, Map jobParams, IJobTrigger trigger, - IBackgroundExecutionStreamProvider outputStreamProvider ) throws SchedulerException { - if ( StringUtils.isEmpty( actionId ) ) { - throw new SchedulerException( Messages.getInstance().getString( "QuartzScheduler.ERROR_0003_ACTION_IS_NULL" ) ); //$NON-NLS-1$ + quartzScheduler = quartzSchedulerFactory.getScheduler(); + } + + logger.debug("Using quartz scheduler " + quartzScheduler); //$NON-NLS-1$ + return quartzScheduler; + } + + private void setQuartzScheduler(Scheduler quartzScheduler) { + this.quartzScheduler = quartzScheduler; } - if ( jobParams == null ) { - jobParams = new HashMap(); + /** + * {@inheritDoc} + */ + public Job createJob(String jobName, String actionId, Map jobParams, IJobTrigger trigger) + throws SchedulerException { + return createJob(jobName, actionId, jobParams, trigger, null); + } + + /** + * {@inheritDoc} + */ + public Job createJob(String jobName, Class action, Map jobParams, + IJobTrigger trigger) throws SchedulerException { + return createJob(jobName, action, jobParams, trigger, null); + } + + /** + * {@inheritDoc} + */ + public Job createJob(String jobName, Class action, Map jobParams, + IJobTrigger trigger, IBackgroundExecutionStreamProvider outputStreamProvider) throws SchedulerException { + + if (action == null) { + throw new SchedulerException(Messages.getInstance().getString("QuartzScheduler.ERROR_0003_ACTION_IS_NULL")); //$NON-NLS-1$ + } + + if (jobParams == null) { + jobParams = new HashMap(); + } + + jobParams.put(RESERVEDMAPKEY_ACTIONCLASS, action.getName()); + Job ret = createJob(jobName, jobParams, trigger, outputStreamProvider); + ret.setSchedulableClass(action.getName()); + return ret; } - - jobParams.put( RESERVEDMAPKEY_ACTIONID, actionId ); - Job ret = createJob( jobName, jobParams, trigger, outputStreamProvider ); - ret.setSchedulableClass( "" ); //$NON-NLS-1$ - return ret; - } - - public static Trigger createQuartzTrigger( IJobTrigger jobTrigger, QuartzJobKey jobId ) throws SchedulerException { - Trigger quartzTrigger = null; - if ( jobTrigger instanceof ComplexJobTrigger ) { - try { - quartzTrigger = - new CronTrigger( jobId.toString(), jobId.getUserName(), jobTrigger.getCronString() != null ? jobTrigger - .getCronString() : QuartzCronStringFactory.createCronString( (ComplexJobTrigger) jobTrigger ) ); - } catch ( ParseException e ) { - throw new SchedulerException( Messages.getInstance().getString( - "QuartzScheduler.ERROR_0001_FAILED_TO_SCHEDULE_JOB", jobId.getJobName() ), e ); //$NON-NLS-1$ - } - } else if ( jobTrigger instanceof SimpleJobTrigger ) { - SimpleJobTrigger simpleTrigger = (SimpleJobTrigger) jobTrigger; - long interval = simpleTrigger.getRepeatInterval(); - if ( interval > 0 ) { - interval *= 1000; - } - int repeatCount = - simpleTrigger.getRepeatCount() < 0 ? SimpleTrigger.REPEAT_INDEFINITELY : simpleTrigger.getRepeatCount(); - quartzTrigger = - new SimpleTrigger( jobId.toString(), jobId.getUserName(), simpleTrigger.getStartTime(), simpleTrigger - .getEndTime(), repeatCount, interval ); - } else { - throw new SchedulerException( Messages.getInstance().getString( "QuartzScheduler.ERROR_0002_TRIGGER_WRONG_TYPE" ) ); //$NON-NLS-1$ - } - if ( quartzTrigger instanceof SimpleTrigger ) { - quartzTrigger.setMisfireInstruction( SimpleTrigger.MISFIRE_INSTRUCTION_RESCHEDULE_NEXT_WITH_REMAINING_COUNT ); - } else { - quartzTrigger.setMisfireInstruction( SimpleTrigger.MISFIRE_INSTRUCTION_FIRE_NOW ); - } - return quartzTrigger; - } - - private JobDetail createJobDetails( QuartzJobKey jobId, Map jobParams ) { - JobDetail jobDetail = new JobDetail( jobId.toString(), jobId.getUserName(), BlockingQuartzJob.class ); - jobParams.put( RESERVEDMAPKEY_ACTIONUSER, jobId.getUserName() ); - JobDataMap jobDataMap = new JobDataMap( jobParams ); - jobDetail.setJobDataMap( jobDataMap ); - return jobDetail; - } - - private Calendar createQuartzCalendar( ComplexJobTrigger complexJobTrigger ) { - Calendar triggerCalendar = null; - if ( ( complexJobTrigger.getStartTime() != null ) || ( complexJobTrigger.getEndTime() != null ) ) { - triggerCalendar = - new QuartzSchedulerAvailability( complexJobTrigger.getStartTime(), complexJobTrigger.getEndTime() ); - } - return triggerCalendar; - } - - /** {@inheritDoc} */ - protected Job createJob( String jobName, Map jobParams, IJobTrigger trigger, - IBackgroundExecutionStreamProvider outputStreamProvider ) throws SchedulerException { - - String curUser = getCurrentUser(); - - // determine if the job params tell us who owns the job - Serializable jobOwner = jobParams.get( RESERVEDMAPKEY_ACTIONUSER ); - if ( jobOwner != null && jobOwner.toString().length() > 0 ) { - curUser = jobOwner.toString(); - } - - QuartzJobKey jobId = new QuartzJobKey( jobName, curUser ); - - Trigger quartzTrigger = createQuartzTrigger( trigger, jobId ); - - if( trigger.getEndTime() != null ){ - quartzTrigger.setEndTime( trigger.getEndTime() ); - } - - Calendar triggerCalendar = - quartzTrigger instanceof CronTrigger ? createQuartzCalendar( (ComplexJobTrigger) trigger ) : null; - - if ( outputStreamProvider != null ) { - jobParams.put( RESERVEDMAPKEY_STREAMPROVIDER, outputStreamProvider ); - } - - if ( trigger.getUiPassParam() != null ) { - jobParams.put( RESERVEDMAPKEY_UIPASSPARAM, trigger.getUiPassParam() ); - } - - if ( !jobParams.containsKey( RESERVEDMAPKEY_LINEAGE_ID ) ) { - String uuid = UUID.randomUUID().toString(); - jobParams.put( RESERVEDMAPKEY_LINEAGE_ID, uuid ); - } - - JobDetail jobDetail = createJobDetails( jobId, jobParams ); - - try { - Scheduler scheduler = getQuartzScheduler(); - if ( triggerCalendar != null ) { - scheduler.addCalendar( jobId.toString(), triggerCalendar, false, false ); - quartzTrigger.setCalendarName( jobId.toString() ); - } - logger - .debug( MessageFormat - .format( - "Scheduling job {0} with trigger {1} and job parameters [ {2} ]", jobId.toString(), trigger, prettyPrintMap( jobParams ) ) ); //$NON-NLS-1$ - scheduler.scheduleJob( jobDetail, quartzTrigger ); - } catch ( org.quartz.SchedulerException e ) { - throw new SchedulerException( Messages.getInstance().getString( - "QuartzScheduler.ERROR_0001_FAILED_TO_SCHEDULE_JOB", jobName ), e ); //$NON-NLS-1$ - } - - Job job = new Job(); - job.setJobParams( jobParams ); - job.setJobTrigger( (JobTrigger) trigger ); - job.setNextRun( quartzTrigger.getNextFireTime() ); - job.setLastRun( quartzTrigger.getPreviousFireTime() ); - job.setJobId( jobId.toString() ); - job.setJobName( jobName ); - job.setUserName( curUser ); - job.setState( JobState.NORMAL ); - - return job; - } - - @Override - public void updateJob( String jobId, Map jobParams, IJobTrigger trigger ) - throws SchedulerException { - QuartzJobKey jobKey = QuartzJobKey.parse( jobId ); - - Trigger quartzTrigger = createQuartzTrigger( trigger, jobKey ); - quartzTrigger.setJobName( jobId ); - quartzTrigger.setJobGroup( jobKey.getUserName() ); - - Calendar triggerCalendar = - quartzTrigger instanceof CronTrigger ? createQuartzCalendar( (ComplexJobTrigger) trigger ) : null; - - try { - Scheduler scheduler = getQuartzScheduler(); - // int triggerState = scheduler.getTriggerState(jobId, jobKey.getUserName()); - // if (triggerState != Trigger.STATE_PAUSED) { - // scheduler.pauseTrigger(jobId, jobKey.getUserName()); - // } - JobDetail origJobDetail = scheduler.getJobDetail( jobId, jobKey.getUserName() ); - if ( origJobDetail.getJobDataMap().containsKey( RESERVEDMAPKEY_ACTIONCLASS ) ) { - jobParams.put( RESERVEDMAPKEY_ACTIONCLASS, origJobDetail.getJobDataMap().get( RESERVEDMAPKEY_ACTIONCLASS ) - .toString() ); - } else if ( origJobDetail.getJobDataMap().containsKey( RESERVEDMAPKEY_ACTIONID ) ) { - jobParams - .put( RESERVEDMAPKEY_ACTIONID, origJobDetail.getJobDataMap().get( RESERVEDMAPKEY_ACTIONID ).toString() ); - } - - if ( origJobDetail.getJobDataMap().containsKey( RESERVEDMAPKEY_STREAMPROVIDER ) ) { - jobParams.put( RESERVEDMAPKEY_STREAMPROVIDER, (Serializable) origJobDetail.getJobDataMap().get( - RESERVEDMAPKEY_STREAMPROVIDER ) ); - } - if ( origJobDetail.getJobDataMap().containsKey( RESERVEDMAPKEY_UIPASSPARAM ) ) { - jobParams.put( RESERVEDMAPKEY_UIPASSPARAM, (Serializable) origJobDetail.getJobDataMap().get( - RESERVEDMAPKEY_UIPASSPARAM ) ); - } - - JobDetail jobDetail = createJobDetails( jobKey, jobParams ); - scheduler.addJob( jobDetail, true ); - if ( triggerCalendar != null ) { - scheduler.addCalendar( jobId.toString(), triggerCalendar, true, true ); - quartzTrigger.setCalendarName( jobId.toString() ); - } - scheduler.rescheduleJob( jobId, jobKey.getUserName(), quartzTrigger ); - // if (triggerState != Trigger.STATE_PAUSED) { - // scheduler.resumeTrigger(jobId, jobKey.getUserName()); - // } - logger - .debug( MessageFormat - .format( - "Scheduling job {0} with trigger {1} and job parameters [ {2} ]", jobId.toString(), trigger, prettyPrintMap( jobParams ) ) ); //$NON-NLS-1$ - } catch ( org.quartz.SchedulerException e ) { - throw new SchedulerException( Messages.getInstance().getString( - "QuartzScheduler.ERROR_0001_FAILED_TO_SCHEDULE_JOB", jobKey.getJobName() ), e ); //$NON-NLS-1$ - } - } - - /** {@inheritDoc} */ - public Map getAvailabilityWindows() { - // TODO Auto-generated method stub - return null; - } - - /** {@inheritDoc} */ - public List getJobHistory( String jobId ) { - // TODO Auto-generated method stub - return null; - } - - /** {@inheritDoc} */ - public void triggerNow( String jobId ) throws SchedulerException { - try { - QuartzJobKey jobKey = QuartzJobKey.parse( jobId ); - Scheduler scheduler = getQuartzScheduler(); - String groupName = jobKey.getUserName(); - for ( Trigger trigger : scheduler.getTriggersOfJob( jobId, groupName ) ) { - if ( "MANUAL_TRIGGER".equals( trigger.getGroup() ) ) { - continue; - } - if ( trigger instanceof SimpleTrigger ) { - ( (SimpleTrigger) trigger ).setPreviousFireTime( new Date() ); - } else if ( trigger instanceof CronTrigger ) { - ( (CronTrigger) trigger ).setPreviousFireTime( new Date() ); - } - // force the trigger to be updated with the previous fire time - scheduler.rescheduleJob( jobId, jobKey.getUserName(), trigger ); - } - - scheduler.triggerJob( jobId, jobKey.getUserName() ); - } catch ( org.quartz.SchedulerException e ) { - throw new SchedulerException( Messages.getInstance().getString( - "QuartzScheduler.ERROR_0007_FAILED_TO_GET_JOB", jobId ), e ); //$NON-NLS-1$ - } - } - - /** {@inheritDoc} */ - @SuppressWarnings( "unchecked" ) - public Job getJob( String jobId ) throws SchedulerException { - try { - Scheduler scheduler = getQuartzScheduler(); - QuartzJobKey jobKey = QuartzJobKey.parse( jobId ); - String groupName = jobKey.getUserName(); - for ( Trigger trigger : scheduler.getTriggersOfJob( jobId, groupName ) ) { + + /** + * {@inheritDoc} + */ + public Job createJob(String jobName, String actionId, Map jobParams, IJobTrigger trigger, + IBackgroundExecutionStreamProvider outputStreamProvider) throws SchedulerException { + if (StringUtils.isEmpty(actionId)) { + throw new SchedulerException(Messages.getInstance().getString("QuartzScheduler.ERROR_0003_ACTION_IS_NULL")); //$NON-NLS-1$ + } + + if (jobParams == null) { + jobParams = new HashMap(); + } + + jobParams.put(RESERVEDMAPKEY_ACTIONID, actionId); + Job ret = createJob(jobName, jobParams, trigger, outputStreamProvider); + ret.setSchedulableClass(""); //$NON-NLS-1$ + return ret; + } + + public static Trigger createQuartzTrigger(IJobTrigger jobTrigger, QuartzJobKey jobId) throws SchedulerException { + Trigger quartzTrigger = null; + if (jobTrigger instanceof ComplexJobTrigger) { + try { + quartzTrigger = + new CronTrigger(jobId.toString(), jobId.getUserName(), jobTrigger.getCronString() != null ? jobTrigger + .getCronString() : QuartzCronStringFactory.createCronString((ComplexJobTrigger) jobTrigger)); + } catch (ParseException e) { + throw new SchedulerException(Messages.getInstance().getString( + "QuartzScheduler.ERROR_0001_FAILED_TO_SCHEDULE_JOB", jobId.getJobName()), e); //$NON-NLS-1$ + } + } else if (jobTrigger instanceof SimpleJobTrigger) { + SimpleJobTrigger simpleTrigger = (SimpleJobTrigger) jobTrigger; + long interval = simpleTrigger.getRepeatInterval(); + if (interval > 0) { + interval *= 1000; + } + int repeatCount = + simpleTrigger.getRepeatCount() < 0 ? SimpleTrigger.REPEAT_INDEFINITELY : simpleTrigger.getRepeatCount(); + quartzTrigger = + new SimpleTrigger(jobId.toString(), jobId.getUserName(), simpleTrigger.getStartTime(), simpleTrigger + .getEndTime(), repeatCount, interval); + } else { + throw new SchedulerException(Messages.getInstance().getString("QuartzScheduler.ERROR_0002_TRIGGER_WRONG_TYPE")); //$NON-NLS-1$ + } + if (quartzTrigger instanceof SimpleTrigger) { + quartzTrigger.setMisfireInstruction(SimpleTrigger.MISFIRE_INSTRUCTION_RESCHEDULE_NEXT_WITH_REMAINING_COUNT); + } else { + quartzTrigger.setMisfireInstruction(SimpleTrigger.MISFIRE_INSTRUCTION_FIRE_NOW); + } + return quartzTrigger; + } + + private JobDetail createJobDetails(QuartzJobKey jobId, Map jobParams) { + JobDetail jobDetail = new JobDetail(jobId.toString(), jobId.getUserName(), BlockingQuartzJob.class); + jobParams.put(RESERVEDMAPKEY_ACTIONUSER, jobId.getUserName()); + JobDataMap jobDataMap = new JobDataMap(jobParams); + jobDetail.setJobDataMap(jobDataMap); + return jobDetail; + } + + private Calendar createQuartzCalendar(ComplexJobTrigger complexJobTrigger) { + Calendar triggerCalendar = null; + if ((complexJobTrigger.getStartTime() != null) || (complexJobTrigger.getEndTime() != null)) { + triggerCalendar = + new QuartzSchedulerAvailability(complexJobTrigger.getStartTime(), complexJobTrigger.getEndTime()); + } + return triggerCalendar; + } + + /** + * {@inheritDoc} + */ + protected Job createJob(String jobName, Map jobParams, IJobTrigger trigger, + IBackgroundExecutionStreamProvider outputStreamProvider) throws SchedulerException { + + String curUser = getCurrentUser(); + + // determine if the job params tell us who owns the job + Serializable jobOwner = jobParams.get(RESERVEDMAPKEY_ACTIONUSER); + if (jobOwner != null && jobOwner.toString().length() > 0) { + curUser = jobOwner.toString(); + } + + QuartzJobKey jobId = new QuartzJobKey(jobName, curUser); + + Trigger quartzTrigger = createQuartzTrigger(trigger, jobId); + + if (trigger.getEndTime() != null) { + quartzTrigger.setEndTime(trigger.getEndTime()); + } + + Calendar triggerCalendar = + quartzTrigger instanceof CronTrigger ? createQuartzCalendar((ComplexJobTrigger) trigger) : null; + + if (outputStreamProvider != null) { + jobParams.put(RESERVEDMAPKEY_STREAMPROVIDER, outputStreamProvider); + } + + if (trigger.getUiPassParam() != null) { + jobParams.put(RESERVEDMAPKEY_UIPASSPARAM, trigger.getUiPassParam()); + } + + if (!jobParams.containsKey(RESERVEDMAPKEY_LINEAGE_ID)) { + String uuid = UUID.randomUUID().toString(); + jobParams.put(RESERVEDMAPKEY_LINEAGE_ID, uuid); + } + + JobDetail jobDetail = createJobDetails(jobId, jobParams); + + try { + Scheduler scheduler = getQuartzScheduler(); + if (triggerCalendar != null) { + scheduler.addCalendar(jobId.toString(), triggerCalendar, false, false); + quartzTrigger.setCalendarName(jobId.toString()); + } + logger + .debug(MessageFormat + .format( + "Scheduling job {0} with trigger {1} and job parameters [ {2} ]", jobId.toString(), trigger, prettyPrintMap(jobParams))); //$NON-NLS-1$ + scheduler.scheduleJob(jobDetail, quartzTrigger); + } catch (org.quartz.SchedulerException e) { + throw new SchedulerException(Messages.getInstance().getString( + "QuartzScheduler.ERROR_0001_FAILED_TO_SCHEDULE_JOB", jobName), e); //$NON-NLS-1$ + } + Job job = new Job(); - JobDetail jobDetail = scheduler.getJobDetail( jobId, groupName ); - if ( jobDetail != null ) { - JobDataMap jobDataMap = jobDetail.getJobDataMap(); - if ( jobDataMap != null ) { - Map wrappedMap = jobDataMap.getWrappedMap(); - job.setJobParams( wrappedMap ); - } - } - - job.setJobId( jobId ); - setJobTrigger( scheduler, job, trigger ); - job.setUserName( jobDetail.getGroup() ); + job.setJobParams(jobParams); + job.setJobTrigger((JobTrigger) trigger); + job.setNextRun(quartzTrigger.getNextFireTime()); + job.setLastRun(quartzTrigger.getPreviousFireTime()); + job.setJobId(jobId.toString()); + job.setJobName(jobName); + job.setUserName(curUser); + job.setState(JobState.NORMAL); + return job; - } - } catch ( org.quartz.SchedulerException e ) { - throw new SchedulerException( Messages.getInstance().getString( - "QuartzScheduler.ERROR_0007_FAILED_TO_GET_JOB", jobId ), e ); //$NON-NLS-1$ - } - return null; - } - - /** {@inheritDoc} */ - @SuppressWarnings( "unchecked" ) - public List getJobs( IJobFilter filter ) throws SchedulerException { - ArrayList jobs = new ArrayList(); - try { - Scheduler scheduler = getQuartzScheduler(); - for ( String groupName : scheduler.getJobGroupNames() ) { - for ( String jobId : scheduler.getJobNames( groupName ) ) { - for ( Trigger trigger : scheduler.getTriggersOfJob( jobId, groupName ) ) { - if ( "MANUAL_TRIGGER".equals( trigger.getGroup() ) ) { - continue; + } + + @Override + public void updateJob(String jobId, Map jobParams, IJobTrigger trigger) + throws SchedulerException { + QuartzJobKey jobKey = QuartzJobKey.parse(jobId); + + Trigger quartzTrigger = createQuartzTrigger(trigger, jobKey); + quartzTrigger.setJobName(jobId); + quartzTrigger.setJobGroup(jobKey.getUserName()); + + Calendar triggerCalendar = + quartzTrigger instanceof CronTrigger ? createQuartzCalendar((ComplexJobTrigger) trigger) : null; + + try { + Scheduler scheduler = getQuartzScheduler(); + // int triggerState = scheduler.getTriggerState(jobId, jobKey.getUserName()); + // if (triggerState != Trigger.STATE_PAUSED) { + // scheduler.pauseTrigger(jobId, jobKey.getUserName()); + // } + JobDetail origJobDetail = scheduler.getJobDetail(jobId, jobKey.getUserName()); + if (origJobDetail.getJobDataMap().containsKey(RESERVEDMAPKEY_ACTIONCLASS)) { + jobParams.put(RESERVEDMAPKEY_ACTIONCLASS, origJobDetail.getJobDataMap().get(RESERVEDMAPKEY_ACTIONCLASS) + .toString()); + } else if (origJobDetail.getJobDataMap().containsKey(RESERVEDMAPKEY_ACTIONID)) { + jobParams + .put(RESERVEDMAPKEY_ACTIONID, origJobDetail.getJobDataMap().get(RESERVEDMAPKEY_ACTIONID).toString()); + } + + if (origJobDetail.getJobDataMap().containsKey(RESERVEDMAPKEY_STREAMPROVIDER)) { + jobParams.put(RESERVEDMAPKEY_STREAMPROVIDER, (Serializable) origJobDetail.getJobDataMap().get( + RESERVEDMAPKEY_STREAMPROVIDER)); } - Job job = new Job(); - job.setGroupName( groupName ); - JobDetail jobDetail = scheduler.getJobDetail( jobId, groupName ); - if ( jobDetail != null ) { - job.setUserName( jobDetail.getGroup() ); - JobDataMap jobDataMap = jobDetail.getJobDataMap(); - if ( jobDataMap != null ) { - Map wrappedMap = jobDataMap.getWrappedMap(); - job.setJobParams( wrappedMap ); - } + if (origJobDetail.getJobDataMap().containsKey(RESERVEDMAPKEY_UIPASSPARAM)) { + jobParams.put(RESERVEDMAPKEY_UIPASSPARAM, (Serializable) origJobDetail.getJobDataMap().get( + RESERVEDMAPKEY_UIPASSPARAM)); } - job.setJobId( jobId ); - setJobTrigger( scheduler, job, trigger ); - job.setJobName( QuartzJobKey.parse( jobId ).getJobName() ); - job.setNextRun( trigger.getNextFireTime() ); - job.setLastRun( trigger.getPreviousFireTime() ); - if ( ( filter == null ) || filter.accept( job ) ) { - jobs.add( job ); + JobDetail jobDetail = createJobDetails(jobKey, jobParams); + scheduler.addJob(jobDetail, true); + if (triggerCalendar != null) { + scheduler.addCalendar(jobId.toString(), triggerCalendar, true, true); + quartzTrigger.setCalendarName(jobId.toString()); + } + scheduler.rescheduleJob(jobId, jobKey.getUserName(), quartzTrigger); + // if (triggerState != Trigger.STATE_PAUSED) { + // scheduler.resumeTrigger(jobId, jobKey.getUserName()); + // } + logger + .debug(MessageFormat + .format( + "Scheduling job {0} with trigger {1} and job parameters [ {2} ]", jobId.toString(), trigger, prettyPrintMap(jobParams))); //$NON-NLS-1$ + } catch (org.quartz.SchedulerException e) { + throw new SchedulerException(Messages.getInstance().getString( + "QuartzScheduler.ERROR_0001_FAILED_TO_SCHEDULE_JOB", jobKey.getJobName()), e); //$NON-NLS-1$ + } + } + + /** + * {@inheritDoc} + */ + public Map getAvailabilityWindows() { + // TODO Auto-generated method stub + return null; + } + + /** + * {@inheritDoc} + */ + public List getJobHistory(String jobId) { + // TODO Auto-generated method stub + return null; + } + + /** + * {@inheritDoc} + */ + public void triggerNow(String jobId) throws SchedulerException { + try { + QuartzJobKey jobKey = QuartzJobKey.parse(jobId); + Scheduler scheduler = getQuartzScheduler(); + String groupName = jobKey.getUserName(); + for (Trigger trigger : scheduler.getTriggersOfJob(jobId, groupName)) { + if ("MANUAL_TRIGGER".equals(trigger.getGroup())) { + continue; + } + if (trigger instanceof SimpleTrigger) { + ((SimpleTrigger) trigger).setPreviousFireTime(new Date()); + } else if (trigger instanceof CronTrigger) { + ((CronTrigger) trigger).setPreviousFireTime(new Date()); + } + // force the trigger to be updated with the previous fire time + scheduler.rescheduleJob(jobId, jobKey.getUserName(), trigger); + } + + scheduler.triggerJob(jobId, jobKey.getUserName()); + } catch (org.quartz.SchedulerException e) { + throw new SchedulerException(Messages.getInstance().getString( + "QuartzScheduler.ERROR_0007_FAILED_TO_GET_JOB", jobId), e); //$NON-NLS-1$ + } + } + + /** + * {@inheritDoc} + */ + @SuppressWarnings("unchecked") + public Job getJob(String jobId) throws SchedulerException { + try { + Scheduler scheduler = getQuartzScheduler(); + QuartzJobKey jobKey = QuartzJobKey.parse(jobId); + String groupName = jobKey.getUserName(); + for (Trigger trigger : scheduler.getTriggersOfJob(jobId, groupName)) { + Job job = new Job(); + JobDetail jobDetail = scheduler.getJobDetail(jobId, groupName); + if (jobDetail != null) { + JobDataMap jobDataMap = jobDetail.getJobDataMap(); + if (jobDataMap != null) { + Map wrappedMap = jobDataMap.getWrappedMap(); + job.setJobParams(wrappedMap); + } + } + + job.setJobId(jobId); + setJobTrigger(scheduler, job, trigger); + job.setUserName(jobDetail.getGroup()); + return job; } - } - } - } - } catch ( org.quartz.SchedulerException e ) { - throw new SchedulerException( - Messages.getInstance().getString( "QuartzScheduler.ERROR_0004_FAILED_TO_LIST_JOBS" ), e ); //$NON-NLS-1$ - } - return jobs; - } - - private void setJobTrigger( Scheduler scheduler, Job job, Trigger trigger ) throws SchedulerException, - org.quartz.SchedulerException { - QuartzJobKey jobKey = QuartzJobKey.parse( job.getJobId() ); - String groupName = jobKey.getUserName(); - - if ( trigger instanceof SimpleTrigger ) { - SimpleTrigger simpleTrigger = (SimpleTrigger) trigger; - SimpleJobTrigger simpleJobTrigger = new SimpleJobTrigger(); - simpleJobTrigger.setStartTime( simpleTrigger.getStartTime() ); - simpleJobTrigger.setEndTime( simpleTrigger.getEndTime() ); - simpleJobTrigger.setUiPassParam( (String) job.getJobParams().get( RESERVEDMAPKEY_UIPASSPARAM ) ); - long interval = simpleTrigger.getRepeatInterval(); - if ( interval > 0 ) { - interval /= 1000; - } - simpleJobTrigger.setRepeatInterval( interval ); - simpleJobTrigger.setRepeatCount( simpleTrigger.getRepeatCount() ); - job.setJobTrigger( simpleJobTrigger ); - } else if ( trigger instanceof CronTrigger ) { - CronTrigger cronTrigger = (CronTrigger) trigger; - ComplexJobTrigger complexJobTrigger = createComplexTrigger( cronTrigger.getCronExpression() ); - complexJobTrigger.setUiPassParam( (String) job.getJobParams().get( RESERVEDMAPKEY_UIPASSPARAM ) ); - complexJobTrigger.setCronString( ( (CronTrigger) trigger ).getCronExpression() ); - job.setJobTrigger( complexJobTrigger ); - if ( trigger.getCalendarName() != null ) { - Calendar calendar = scheduler.getCalendar( trigger.getCalendarName() ); - if ( calendar instanceof QuartzSchedulerAvailability ) { - QuartzSchedulerAvailability quartzSchedulerAvailability = (QuartzSchedulerAvailability) calendar; - complexJobTrigger.setStartTime( quartzSchedulerAvailability.getStartTime() ); - complexJobTrigger.setEndTime( quartzSchedulerAvailability.getEndTime() ); - } - } - complexJobTrigger.setCronString( ( (CronTrigger) trigger ).getCronExpression() ); - } - - int triggerState = scheduler.getTriggerState( job.getJobId(), groupName ); - switch ( triggerState ) { - case Trigger.STATE_NORMAL: - job.setState( JobState.NORMAL ); - break; - case Trigger.STATE_BLOCKED: - job.setState( JobState.BLOCKED ); - break; - case Trigger.STATE_COMPLETE: - job.setState( JobState.COMPLETE ); - break; - case Trigger.STATE_ERROR: - job.setState( JobState.ERROR ); - break; - case Trigger.STATE_PAUSED: - job.setState( JobState.PAUSED ); - break; - default: - job.setState( JobState.UNKNOWN ); - break; - } - - job.setJobName( QuartzJobKey.parse( job.getJobId() ).getJobName() ); - job.setNextRun( trigger.getNextFireTime() ); - job.setLastRun( trigger.getPreviousFireTime() ); - - } - - /** {@inheritDoc} */ - public Integer getMinScheduleInterval( IScheduleSubject subject ) { - // TODO Auto-generated method stub - return 0; - } - - /** {@inheritDoc} */ - public ComplexJobTrigger getSubjectAvailabilityWindow( IScheduleSubject subject ) { - // TODO Auto-generated method stub - return null; - } - - /** {@inheritDoc} */ - public void pause() throws SchedulerException { - try { - getQuartzScheduler().standby(); - } catch ( org.quartz.SchedulerException e ) { - throw new SchedulerException( e ); - } - } - - /** {@inheritDoc} */ - public void pauseJob( String jobId ) throws SchedulerException { - try { - Scheduler scheduler = getQuartzScheduler(); - scheduler.pauseJob( jobId, QuartzJobKey.parse( jobId ).getUserName() ); - } catch ( org.quartz.SchedulerException e ) { - throw new SchedulerException( Messages.getInstance() - .getString( "QuartzScheduler.ERROR_0005_FAILED_TO_PAUSE_JOBS" ), e ); //$NON-NLS-1$ - } - } - - /** {@inheritDoc} */ - public void removeJob( String jobId ) throws SchedulerException { - try { - Scheduler scheduler = getQuartzScheduler(); - scheduler.deleteJob( jobId, QuartzJobKey.parse( jobId ).getUserName() ); - } catch ( org.quartz.SchedulerException e ) { - throw new SchedulerException( Messages.getInstance() - .getString( "QuartzScheduler.ERROR_0005_FAILED_TO_PAUSE_JOBS" ), e ); //$NON-NLS-1$ - } - } - - /** {@inheritDoc} */ - public void start() throws SchedulerException { - try { - getQuartzScheduler().start(); - } catch ( org.quartz.SchedulerException e ) { - throw new SchedulerException( e ); - } - } - - /** {@inheritDoc} */ - public void resumeJob( String jobId ) throws SchedulerException { - try { - Scheduler scheduler = getQuartzScheduler(); - scheduler.resumeJob( jobId, QuartzJobKey.parse( jobId ).getUserName() ); - } catch ( org.quartz.SchedulerException e ) { - throw new SchedulerException( Messages.getInstance().getString( - "QuartzScheduler.ERROR_0005_FAILED_TO_RESUME_JOBS" ), e ); //$NON-NLS-1$ - } - } - - /** {@inheritDoc} */ - public void setAvailabilityWindows( Map availability ) { - // TODO Auto-generated method stub - - } - - /** {@inheritDoc} */ - public void setMinScheduleInterval( IScheduleSubject subject, int intervalInSeconds ) { - // TODO Auto-generated method stub - - } - - /** {@inheritDoc} */ - public void setSubjectAvailabilityWindow( IScheduleSubject subject, ComplexJobTrigger availability ) { - // TODO Auto-generated method stub - - } - - /** - * @return - */ - protected String getCurrentUser() { - IPentahoSession session = PentahoSessionHolder.getSession(); - if ( session == null ) { - return null; - } - Principal p = SecurityHelper.getInstance().getAuthentication(); - return ( p == null ) ? null : p.getName(); - } - - public static ComplexJobTrigger createComplexTrigger( String cronExpression ) { - ComplexJobTrigger complexJobTrigger = new ComplexJobTrigger(); - complexJobTrigger.setHourlyRecurrence( (ITimeRecurrence) null ); - complexJobTrigger.setMinuteRecurrence( (ITimeRecurrence) null ); - complexJobTrigger.setSecondRecurrence( (ITimeRecurrence) null ); - - for ( ITimeRecurrence recurrence : parseRecurrence( cronExpression, 6 ) ) { - complexJobTrigger.addYearlyRecurrence( recurrence ); - } - for ( ITimeRecurrence recurrence : parseRecurrence( cronExpression, 4 ) ) { - complexJobTrigger.addMonthlyRecurrence( recurrence ); - } - List dayOfWeekRecurrences = parseDayOfWeekRecurrences( cronExpression ); - List dayOfMonthRecurrences = parseRecurrence( cronExpression, 3 ); - if ( ( dayOfWeekRecurrences.size() > 0 ) && ( dayOfMonthRecurrences.size() == 0 ) ) { - for ( ITimeRecurrence recurrence : dayOfWeekRecurrences ) { - complexJobTrigger.addDayOfWeekRecurrence( recurrence ); - } - } else if ( ( dayOfWeekRecurrences.size() == 0 ) && ( dayOfMonthRecurrences.size() > 0 ) ) { - for ( ITimeRecurrence recurrence : dayOfMonthRecurrences ) { - complexJobTrigger.addDayOfMonthRecurrence( recurrence ); - } - } - for ( ITimeRecurrence recurrence : parseRecurrence( cronExpression, 2 ) ) { - complexJobTrigger.addHourlyRecurrence( recurrence ); - } - for ( ITimeRecurrence recurrence : parseRecurrence( cronExpression, 1 ) ) { - complexJobTrigger.addMinuteRecurrence( recurrence ); - } - for ( ITimeRecurrence recurrence : parseRecurrence( cronExpression, 0 ) ) { - complexJobTrigger.addSecondRecurrence( recurrence ); - } - return complexJobTrigger; - } - - private static List parseDayOfWeekRecurrences( String cronExpression ) { - List dayOfWeekRecurrence = new ArrayList(); - String delims = "[ ]+"; //$NON-NLS-1$ - String[] tokens = cronExpression.split( delims ); - if ( tokens.length >= 6 ) { - String dayOfWeekTokens = tokens[5]; - tokens = dayOfWeekTokens.split( "," ); //$NON-NLS-1$ - if ( ( tokens.length > 1 ) || !( tokens[0].equals( "*" ) || tokens[0].equals( "?" ) ) ) { //$NON-NLS-1$ //$NON-NLS-2$ - RecurrenceList dayOfWeekList = null; - for ( String token : tokens ) { - if ( listPattern.matcher( token ).matches() ) { - if ( dayOfWeekList == null ) { - dayOfWeekList = new RecurrenceList(); + } catch (org.quartz.SchedulerException e) { + throw new SchedulerException(Messages.getInstance().getString( + "QuartzScheduler.ERROR_0007_FAILED_TO_GET_JOB", jobId), e); //$NON-NLS-1$ + } + return null; + } + + /** + * {@inheritDoc} + */ + @SuppressWarnings("unchecked") + public List getJobs(IJobFilter filter) throws SchedulerException { + ArrayList jobs = new ArrayList(); + try { + Scheduler scheduler = getQuartzScheduler(); + for (String groupName : scheduler.getJobGroupNames()) { + for (String jobId : scheduler.getJobNames(groupName)) { + for (Trigger trigger : scheduler.getTriggersOfJob(jobId, groupName)) { + if ("MANUAL_TRIGGER".equals(trigger.getGroup())) { + continue; + } + Job job = new Job(); + job.setGroupName(groupName); + JobDetail jobDetail = scheduler.getJobDetail(jobId, groupName); + if (jobDetail != null) { + job.setUserName(jobDetail.getGroup()); + JobDataMap jobDataMap = jobDetail.getJobDataMap(); + if (jobDataMap != null) { + Map wrappedMap = jobDataMap.getWrappedMap(); + job.setJobParams(wrappedMap); + } + } + + job.setJobId(jobId); + setJobTrigger(scheduler, job, trigger); + job.setJobName(QuartzJobKey.parse(jobId).getJobName()); + job.setNextRun(trigger.getNextFireTime()); + job.setLastRun(trigger.getPreviousFireTime()); + if ((filter == null) || filter.accept(job)) { + jobs.add(job); + } + } + } } - dayOfWeekList.getValues().add( Integer.parseInt( token ) ); - } else { - if ( dayOfWeekList != null ) { - dayOfWeekRecurrence.add( dayOfWeekList ); - dayOfWeekList = null; + } catch (org.quartz.SchedulerException e) { + throw new SchedulerException( + Messages.getInstance().getString("QuartzScheduler.ERROR_0004_FAILED_TO_LIST_JOBS"), e); //$NON-NLS-1$ + } + return jobs; + } + + private void setJobTrigger(Scheduler scheduler, Job job, Trigger trigger) throws SchedulerException, + org.quartz.SchedulerException { + QuartzJobKey jobKey = QuartzJobKey.parse(job.getJobId()); + String groupName = jobKey.getUserName(); + + if (trigger instanceof SimpleTrigger) { + SimpleTrigger simpleTrigger = (SimpleTrigger) trigger; + SimpleJobTrigger simpleJobTrigger = new SimpleJobTrigger(); + simpleJobTrigger.setStartTime(simpleTrigger.getStartTime()); + simpleJobTrigger.setEndTime(simpleTrigger.getEndTime()); + simpleJobTrigger.setUiPassParam((String) job.getJobParams().get(RESERVEDMAPKEY_UIPASSPARAM)); + long interval = simpleTrigger.getRepeatInterval(); + if (interval > 0) { + interval /= 1000; } - if ( sequencePattern.matcher( token ).matches() ) { - String[] days = token.split( "-" ); //$NON-NLS-1$ - dayOfWeekRecurrence.add( new SequentialRecurrence( Integer.parseInt( days[0] ), Integer - .parseInt( days[1] ) ) ); - } else if ( intervalPattern.matcher( token ).matches() ) { - String[] days = token.split( "/" ); //$NON-NLS-1$ - dayOfWeekRecurrence.add( new IncrementalRecurrence( Integer.parseInt( days[0] ), Integer - .parseInt( days[1] ) ) ); - } else if ( qualifiedDayPattern.matcher( token ).matches() ) { - String[] days = token.split( "#" ); //$NON-NLS-1$ - dayOfWeekRecurrence - .add( new QualifiedDayOfWeek( Integer.parseInt( days[1] ), Integer.parseInt( days[0] ) ) ); - } else if ( lastDayPattern.matcher( token ).matches() ) { - DayOfWeek dayOfWeek = - DayOfWeek.values()[( Integer.parseInt( token.substring( 0, token.length() - 1 ) ) - 1 ) % 7]; - dayOfWeekRecurrence.add( new QualifiedDayOfWeek( DayOfWeekQualifier.LAST, dayOfWeek ) ); - } else if ( dayOfWeekRangePattern.matcher( token ).matches() ) { - String[] days = token.split( "-" ); //$NON-NLS-1$ - int start = DayOfWeek.valueOf( days[0] ).ordinal(); - int finish = DayOfWeek.valueOf( days[1] ).ordinal(); - dayOfWeekRecurrence.add( new SequentialRecurrence( start, finish ) ); - } else { - dayOfWeekList = new RecurrenceList(); - dayOfWeekList.getValues().add( DayOfWeek.valueOf( token ).ordinal() ); - dayOfWeekRecurrence.add( dayOfWeekList ); - dayOfWeekList = null; - // } else { - // throw new IllegalArgumentException(Messages.getInstance().getErrorString( - // "ComplexJobTrigger.ERROR_0001_InvalidCronExpression")); //$NON-NLS-1$ + simpleJobTrigger.setRepeatInterval(interval); + simpleJobTrigger.setRepeatCount(simpleTrigger.getRepeatCount()); + job.setJobTrigger(simpleJobTrigger); + } else if (trigger instanceof CronTrigger) { + CronTrigger cronTrigger = (CronTrigger) trigger; + ComplexJobTrigger complexJobTrigger = createComplexTrigger(cronTrigger.getCronExpression()); + complexJobTrigger.setUiPassParam((String) job.getJobParams().get(RESERVEDMAPKEY_UIPASSPARAM)); + complexJobTrigger.setCronString(((CronTrigger) trigger).getCronExpression()); + job.setJobTrigger(complexJobTrigger); + if (trigger.getCalendarName() != null) { + Calendar calendar = scheduler.getCalendar(trigger.getCalendarName()); + if (calendar instanceof QuartzSchedulerAvailability) { + QuartzSchedulerAvailability quartzSchedulerAvailability = (QuartzSchedulerAvailability) calendar; + complexJobTrigger.setStartTime(quartzSchedulerAvailability.getStartTime()); + complexJobTrigger.setEndTime(quartzSchedulerAvailability.getEndTime()); + } } - } - - } - if ( dayOfWeekList != null ) { - dayOfWeekRecurrence.add( dayOfWeekList ); - } - } - } else { - throw new IllegalArgumentException( Messages.getInstance().getErrorString( - "ComplexJobTrigger.ERROR_0001_InvalidCronExpression" ) ); //$NON-NLS-1$ - } - return dayOfWeekRecurrence; - } - - private static List parseRecurrence( String cronExpression, int tokenIndex ) { - List timeRecurrence = new ArrayList(); - String delims = "[ ]+"; //$NON-NLS-1$ - String[] tokens = cronExpression.split( delims ); - if ( tokens.length > tokenIndex ) { - String timeTokens = tokens[tokenIndex]; - tokens = timeTokens.split( "," ); //$NON-NLS-1$ - if ( ( tokens.length > 1 ) || !( tokens[0].equals( "*" ) || tokens[0].equals( "?" ) ) ) { //$NON-NLS-1$ //$NON-NLS-2$ - RecurrenceList timeList = null; - for ( String token : tokens ) { - if ( listPattern.matcher( token ).matches() ) { - if ( timeList == null ) { - timeList = new RecurrenceList(); + complexJobTrigger.setCronString(((CronTrigger) trigger).getCronExpression()); + } + + int triggerState = scheduler.getTriggerState(job.getJobId(), groupName); + switch (triggerState) { + case Trigger.STATE_NORMAL: + job.setState(JobState.NORMAL); + break; + case Trigger.STATE_BLOCKED: + job.setState(JobState.BLOCKED); + break; + case Trigger.STATE_COMPLETE: + job.setState(JobState.COMPLETE); + break; + case Trigger.STATE_ERROR: + job.setState(JobState.ERROR); + break; + case Trigger.STATE_PAUSED: + job.setState(JobState.PAUSED); + break; + default: + job.setState(JobState.UNKNOWN); + break; + } + + job.setJobName(QuartzJobKey.parse(job.getJobId()).getJobName()); + job.setNextRun(trigger.getNextFireTime()); + job.setLastRun(trigger.getPreviousFireTime()); + + } + + /** + * {@inheritDoc} + */ + public Integer getMinScheduleInterval(IScheduleSubject subject) { + // TODO Auto-generated method stub + return 0; + } + + /** + * {@inheritDoc} + */ + public ComplexJobTrigger getSubjectAvailabilityWindow(IScheduleSubject subject) { + // TODO Auto-generated method stub + return null; + } + + /** + * {@inheritDoc} + */ + public void pause() throws SchedulerException { + try { + getQuartzScheduler().standby(); + } catch (org.quartz.SchedulerException e) { + throw new SchedulerException(e); + } + } + + /** + * {@inheritDoc} + */ + public void pauseJob(String jobId) throws SchedulerException { + try { + Scheduler scheduler = getQuartzScheduler(); + scheduler.pauseJob(jobId, QuartzJobKey.parse(jobId).getUserName()); + } catch (org.quartz.SchedulerException e) { + throw new SchedulerException(Messages.getInstance() + .getString("QuartzScheduler.ERROR_0005_FAILED_TO_PAUSE_JOBS"), e); //$NON-NLS-1$ + } + } + + /** + * {@inheritDoc} + */ + public void removeJob(String jobId) throws SchedulerException { + try { + Scheduler scheduler = getQuartzScheduler(); + scheduler.deleteJob(jobId, QuartzJobKey.parse(jobId).getUserName()); + } catch (org.quartz.SchedulerException e) { + throw new SchedulerException(Messages.getInstance() + .getString("QuartzScheduler.ERROR_0005_FAILED_TO_PAUSE_JOBS"), e); //$NON-NLS-1$ + } + } + + /** + * {@inheritDoc} + */ + public void start() throws SchedulerException { + try { + getQuartzScheduler().start(); + } catch (org.quartz.SchedulerException e) { + throw new SchedulerException(e); + } + } + + /** + * {@inheritDoc} + */ + public void resumeJob(String jobId) throws SchedulerException { + try { + Scheduler scheduler = getQuartzScheduler(); + scheduler.resumeJob(jobId, QuartzJobKey.parse(jobId).getUserName()); + } catch (org.quartz.SchedulerException e) { + throw new SchedulerException(Messages.getInstance().getString( + "QuartzScheduler.ERROR_0005_FAILED_TO_RESUME_JOBS"), e); //$NON-NLS-1$ + } + } + + /** + * {@inheritDoc} + */ + public void setAvailabilityWindows(Map availability) { + // TODO Auto-generated method stub + + } + + /** + * {@inheritDoc} + */ + public void setMinScheduleInterval(IScheduleSubject subject, int intervalInSeconds) { + // TODO Auto-generated method stub + + } + + /** + * {@inheritDoc} + */ + public void setSubjectAvailabilityWindow(IScheduleSubject subject, ComplexJobTrigger availability) { + // TODO Auto-generated method stub + + } + + /** + * @return + */ + protected String getCurrentUser() { + IPentahoSession session = PentahoSessionHolder.getSession(); + if (session == null) { + return null; + } + Principal p = SecurityHelper.getInstance().getAuthentication(); + return (p == null) ? null : p.getName(); + } + + public static ComplexJobTrigger createComplexTrigger(String cronExpression) { + ComplexJobTrigger complexJobTrigger = new ComplexJobTrigger(); + complexJobTrigger.setHourlyRecurrence((ITimeRecurrence) null); + complexJobTrigger.setMinuteRecurrence((ITimeRecurrence) null); + complexJobTrigger.setSecondRecurrence((ITimeRecurrence) null); + + for (ITimeRecurrence recurrence : parseRecurrence(cronExpression, 6)) { + complexJobTrigger.addYearlyRecurrence(recurrence); + } + for (ITimeRecurrence recurrence : parseRecurrence(cronExpression, 4)) { + complexJobTrigger.addMonthlyRecurrence(recurrence); + } + List dayOfWeekRecurrences = parseDayOfWeekRecurrences(cronExpression); + List dayOfMonthRecurrences = parseRecurrence(cronExpression, 3); + if ((dayOfWeekRecurrences.size() > 0) && (dayOfMonthRecurrences.size() == 0)) { + for (ITimeRecurrence recurrence : dayOfWeekRecurrences) { + complexJobTrigger.addDayOfWeekRecurrence(recurrence); + } + } else if ((dayOfWeekRecurrences.size() == 0) && (dayOfMonthRecurrences.size() > 0)) { + for (ITimeRecurrence recurrence : dayOfMonthRecurrences) { + complexJobTrigger.addDayOfMonthRecurrence(recurrence); + } + } + for (ITimeRecurrence recurrence : parseRecurrence(cronExpression, 2)) { + complexJobTrigger.addHourlyRecurrence(recurrence); + } + for (ITimeRecurrence recurrence : parseRecurrence(cronExpression, 1)) { + complexJobTrigger.addMinuteRecurrence(recurrence); + } + for (ITimeRecurrence recurrence : parseRecurrence(cronExpression, 0)) { + complexJobTrigger.addSecondRecurrence(recurrence); + } + return complexJobTrigger; + } + + private static List parseDayOfWeekRecurrences(String cronExpression) { + List dayOfWeekRecurrence = new ArrayList(); + String delims = "[ ]+"; //$NON-NLS-1$ + String[] tokens = cronExpression.split(delims); + if (tokens.length >= 6) { + String dayOfWeekTokens = tokens[5]; + tokens = dayOfWeekTokens.split(","); //$NON-NLS-1$ + if ((tokens.length > 1) || !(tokens[0].equals("*") || tokens[0].equals("?"))) { //$NON-NLS-1$ //$NON-NLS-2$ + RecurrenceList dayOfWeekList = null; + for (String token : tokens) { + if (listPattern.matcher(token).matches()) { + if (dayOfWeekList == null) { + dayOfWeekList = new RecurrenceList(); + } + dayOfWeekList.getValues().add(Integer.parseInt(token)); + } else { + if (dayOfWeekList != null) { + dayOfWeekRecurrence.add(dayOfWeekList); + dayOfWeekList = null; + } + if (sequencePattern.matcher(token).matches()) { + String[] days = token.split("-"); //$NON-NLS-1$ + dayOfWeekRecurrence.add(new SequentialRecurrence(Integer.parseInt(days[0]), Integer + .parseInt(days[1]))); + } else if (intervalPattern.matcher(token).matches()) { + String[] days = token.split("/"); //$NON-NLS-1$ + dayOfWeekRecurrence.add(new IncrementalRecurrence(Integer.parseInt(days[0]), Integer + .parseInt(days[1]))); + } else if (qualifiedDayPattern.matcher(token).matches()) { + String[] days = token.split("#"); //$NON-NLS-1$ + dayOfWeekRecurrence + .add(new QualifiedDayOfWeek(Integer.parseInt(days[1]), Integer.parseInt(days[0]))); + } else if (lastDayPattern.matcher(token).matches()) { + DayOfWeek dayOfWeek = + DayOfWeek.values()[(Integer.parseInt(token.substring(0, token.length() - 1)) - 1) % 7]; + dayOfWeekRecurrence.add(new QualifiedDayOfWeek(DayOfWeekQualifier.LAST, dayOfWeek)); + } else if (dayOfWeekRangePattern.matcher(token).matches()) { + String[] days = token.split("-"); //$NON-NLS-1$ + int start = DayOfWeek.valueOf(days[0]).ordinal(); + int finish = DayOfWeek.valueOf(days[1]).ordinal(); + dayOfWeekRecurrence.add(new SequentialRecurrence(start, finish)); + } else { + dayOfWeekList = new RecurrenceList(); + dayOfWeekList.getValues().add(DayOfWeek.valueOf(token).ordinal()); + dayOfWeekRecurrence.add(dayOfWeekList); + dayOfWeekList = null; + // } else { + // throw new IllegalArgumentException(Messages.getInstance().getErrorString( + // "ComplexJobTrigger.ERROR_0001_InvalidCronExpression")); //$NON-NLS-1$ + } + } + + } + if (dayOfWeekList != null) { + dayOfWeekRecurrence.add(dayOfWeekList); + } } - timeList.getValues().add( Integer.parseInt( token ) ); - } else { - if ( timeList != null ) { - timeRecurrence.add( timeList ); - timeList = null; + } else { + throw new IllegalArgumentException(Messages.getInstance().getErrorString( + "ComplexJobTrigger.ERROR_0001_InvalidCronExpression")); //$NON-NLS-1$ + } + return dayOfWeekRecurrence; + } + + private static List parseRecurrence(String cronExpression, int tokenIndex) { + List timeRecurrence = new ArrayList(); + String delims = "[ ]+"; //$NON-NLS-1$ + String[] tokens = cronExpression.split(delims); + if (tokens.length > tokenIndex) { + String timeTokens = tokens[tokenIndex]; + tokens = timeTokens.split(","); //$NON-NLS-1$ + if ((tokens.length > 1) || !(tokens[0].equals("*") || tokens[0].equals("?"))) { //$NON-NLS-1$ //$NON-NLS-2$ + RecurrenceList timeList = null; + for (String token : tokens) { + if (listPattern.matcher(token).matches()) { + if (timeList == null) { + timeList = new RecurrenceList(); + } + timeList.getValues().add(Integer.parseInt(token)); + } else { + if (timeList != null) { + timeRecurrence.add(timeList); + timeList = null; + } + if (sequencePattern.matcher(token).matches()) { + String[] days = token.split("-"); //$NON-NLS-1$ + timeRecurrence.add(new SequentialRecurrence(Integer.parseInt(days[0]), + Integer.parseInt(days[1]))); + } else if (intervalPattern.matcher(token).matches()) { + String[] days = token.split("/"); //$NON-NLS-1$ + timeRecurrence + .add(new IncrementalRecurrence(Integer.parseInt(days[0]), Integer.parseInt(days[1]))); + } else if ("L".equalsIgnoreCase(token)) { + timeRecurrence.add(new QualifiedDayOfMonth()); + } else { + throw new IllegalArgumentException(Messages.getInstance().getErrorString( + "ComplexJobTrigger.ERROR_0001_InvalidCronExpression")); //$NON-NLS-1$ + } + } + + } + if (timeList != null) { + timeRecurrence.add(timeList); + } } - if ( sequencePattern.matcher( token ).matches() ) { - String[] days = token.split( "-" ); //$NON-NLS-1$ - timeRecurrence.add( new SequentialRecurrence( Integer.parseInt( days[0] ), - Integer.parseInt( days[ 1 ] ) ) ); - } else if ( intervalPattern.matcher( token ).matches() ) { - String[] days = token.split( "/" ); //$NON-NLS-1$ - timeRecurrence - .add( new IncrementalRecurrence( Integer.parseInt( days[ 0 ] ), Integer.parseInt( days[ 1 ] ) ) ); - } else if ( "L".equalsIgnoreCase( token ) ) { - timeRecurrence.add( new QualifiedDayOfMonth() ); - } else { - throw new IllegalArgumentException( Messages.getInstance().getErrorString( - "ComplexJobTrigger.ERROR_0001_InvalidCronExpression" ) ); //$NON-NLS-1$ + } else { + throw new IllegalArgumentException(Messages.getInstance().getErrorString( + "ComplexJobTrigger.ERROR_0001_InvalidCronExpression")); //$NON-NLS-1$ + } + return timeRecurrence; + } + + /** + * {@inheritDoc} + */ + public SchedulerStatus getStatus() throws SchedulerException { + SchedulerStatus schedulerStatus = SchedulerStatus.STOPPED; + try { + if (getQuartzScheduler().isInStandbyMode()) { + schedulerStatus = SchedulerStatus.PAUSED; + } else if (getQuartzScheduler().isStarted()) { + schedulerStatus = SchedulerStatus.RUNNING; } - } - - } - if ( timeList != null ) { - timeRecurrence.add( timeList ); - } - } - } else { - throw new IllegalArgumentException( Messages.getInstance().getErrorString( - "ComplexJobTrigger.ERROR_0001_InvalidCronExpression" ) ); //$NON-NLS-1$ - } - return timeRecurrence; - } - - /** {@inheritDoc} */ - public SchedulerStatus getStatus() throws SchedulerException { - SchedulerStatus schedulerStatus = SchedulerStatus.STOPPED; - try { - if ( getQuartzScheduler().isInStandbyMode() ) { - schedulerStatus = SchedulerStatus.PAUSED; - } else if ( getQuartzScheduler().isStarted() ) { - schedulerStatus = SchedulerStatus.RUNNING; - } - } catch ( org.quartz.SchedulerException e ) { - throw new SchedulerException( Messages.getInstance().getString( - "QuartzScheduler.ERROR_0006_FAILED_TO_GET_SCHEDULER_STATUS" ), e ); //$NON-NLS-1$ - } - return schedulerStatus; - } - - /** {@inheritDoc} */ - public void shutdown() throws SchedulerException { - try { - boolean waitForJobsToComplete = true; - getQuartzScheduler().shutdown( waitForJobsToComplete ); - setQuartzScheduler(null); - } catch ( org.quartz.SchedulerException e ) { - throw new SchedulerException( e ); - } - } - - public static String prettyPrintMap( Map map ) { - StringBuilder b = new StringBuilder(); - for ( Map.Entry entry : map.entrySet() ) { - b.append( entry.getKey() ); - b.append( "=" ); //$NON-NLS-1$ - b.append( entry.getValue() ); - b.append( "; " ); //$NON-NLS-1$ - } - return b.toString(); - } - - public void addListener( ISchedulerListener listener ) { - listeners.add( listener ); - } - - public void setListeners( Collection listeners ) { - this.listeners.addAll( listeners ); - } - - public void fireJobCompleted( IAction actionBean, String actionUser, Map params, - IBackgroundExecutionStreamProvider streamProvider ) { - for ( ISchedulerListener listener : listeners ) { - listener.jobCompleted( actionBean, actionUser, params, streamProvider ); - } - } + } catch (org.quartz.SchedulerException e) { + throw new SchedulerException(Messages.getInstance().getString( + "QuartzScheduler.ERROR_0006_FAILED_TO_GET_SCHEDULER_STATUS"), e); //$NON-NLS-1$ + } + return schedulerStatus; + } + + /** + * {@inheritDoc} + */ + public void shutdown() throws SchedulerException { + try { + boolean waitForJobsToComplete = true; + getQuartzScheduler().shutdown(waitForJobsToComplete); + setQuartzScheduler(null); + } catch (org.quartz.SchedulerException e) { + throw new SchedulerException(e); + } + } + + public static String prettyPrintMap(Map map) { + StringBuilder b = new StringBuilder(); + for (Map.Entry entry : map.entrySet()) { + b.append(entry.getKey()); + b.append("="); //$NON-NLS-1$ + b.append(entry.getValue()); + b.append("; "); //$NON-NLS-1$ + } + return b.toString(); + } + + public void addListener(ISchedulerListener listener) { + listeners.add(listener); + } + + public void setListeners(Collection listeners) { + this.listeners.addAll(listeners); + } + + public void fireJobCompleted(IAction actionBean, String actionUser, Map params, + IBackgroundExecutionStreamProvider streamProvider) { + for (ISchedulerListener listener : listeners) { + listener.jobCompleted(actionBean, actionUser, params, streamProvider); + } + } } diff --git a/pom.xml b/pom.xml index 392d313..2634a8b 100644 --- a/pom.xml +++ b/pom.xml @@ -1,4 +1,5 @@ - + 4.0.0 com.github.zhicwu From e91ad91eb27824cebdd35fe16a56484708835c27 Mon Sep 17 00:00:00 2001 From: Zhichun Wu Date: Sun, 28 Aug 2016 18:10:53 +0800 Subject: [PATCH 03/15] Merge data sources from master to slave --- .../core/database/util/DataSourceLocator.java | 142 ++++++++++++++++++ .../di/core/database/util/DatabaseUtil.java | 5 + 2 files changed, 147 insertions(+) create mode 100644 pentaho-kettle/src/main/java/org/pentaho/di/core/database/util/DataSourceLocator.java diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/core/database/util/DataSourceLocator.java b/pentaho-kettle/src/main/java/org/pentaho/di/core/database/util/DataSourceLocator.java new file mode 100644 index 0000000..f881185 --- /dev/null +++ b/pentaho-kettle/src/main/java/org/pentaho/di/core/database/util/DataSourceLocator.java @@ -0,0 +1,142 @@ +/*! ****************************************************************************** + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + ******************************************************************************/ +package org.pentaho.di.core.database.util; + +import org.osjava.sj.loader.SJDataSource; +import org.pentaho.database.IDatabaseDialect; +import org.pentaho.database.dialect.GenericDatabaseDialect; +import org.pentaho.database.model.DatabaseAccessType; +import org.pentaho.database.model.IDatabaseConnection; +import org.pentaho.di.core.logging.LogChannelInterface; +import org.pentaho.di.www.CarteSingleton; + +import javax.naming.Context; +import javax.sql.DataSource; +import java.util.*; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * Utility class for data source lookup. + * + * @author Zhichun Wu + */ +public final class DataSourceLocator { + private static boolean activated = false; + + private static final AtomicBoolean changed = new AtomicBoolean(false); + private static final Map dataSources + = Collections.synchronizedMap(new HashMap()); + + private static final ServiceLoader dialectLoader = ServiceLoader.load(IDatabaseDialect.class); + + private static DataSource buildDataSource(IDatabaseConnection conn) throws Exception { + String dbType = conn.getDatabaseType().getShortName(); + + SJDataSource ds = null; + + for (IDatabaseDialect dialect : dialectLoader) { + // FIXME fallback to name/desc like Kettle did? + if (!dbType.equals(dialect.getDatabaseType().getShortName())) { + continue; + } + + ds = new SJDataSource(dialect instanceof GenericDatabaseDialect + ? conn.getAttributes().get(GenericDatabaseDialect.ATTRIBUTE_CUSTOM_DRIVER_CLASS) + : dialect.getNativeDriver(), + dialect.supportsOptionsInURL() ? dialect.getURLWithExtraOptions(conn) : dialect.getURL(conn), + conn.getUsername(), + conn.getPassword(), + new Properties()); + break; + } + + return ds; + } + + /** + * This method tries to import data sources defined in Kettle master time after time. + * It does nothing in non-cluster environment, but it addresses data source configuration issue in Kettle cluster: + * - zero data source configuration in Kettle slave servers - everything comes from master + * - cache data source configuration in memory for a while for better performance + * - update cached data source in case Kettle master changed configuration of a certain data source + * + * @param dsCache shared in-memory cache of data sources regardless where it comes from + * @param ctx naming context for binding / rebinding data source from master + * @param dsName name of the data source + */ + static void importDataSourcesFromMaster(Map dsCache, Context ctx, String dsName) { + if (!activated) { + return; + } + + // this is not truly thread-safe, as data sources might be updated in the same time + // however, it is worthy of doing this for better performance + if (changed.compareAndSet(true, false)) { + dsCache.putAll(dataSources); + + LogChannelInterface log = CarteSingleton.getInstance().getLog(); + log.logBasic("Data source cache refreshed successfully"); + } + } + + public static void activate() { + activated = true; + } + + public static void deactivate() { + activated = false; + } + + + public static int updateDataSourceMappings(Map mapping) { + if (mapping == null || mapping.size() == 0) { + return 0; + } + + LogChannelInterface log = CarteSingleton.getInstance().getLog(); + + int counter = 0; + for (Map.Entry entry : mapping.entrySet()) { + String dsName = entry.getKey(); + IDatabaseConnection ds = entry.getValue(); + + if (ds.getAccessType() == DatabaseAccessType.NATIVE && ds.getDatabaseType() != null) { + try { + DataSource d = buildDataSource(ds); + if (d == null) { + log.logError("Bypass unsupported data source: " + dsName); + continue; + } + + // it's better to use a separated map here but we should be just fine + dataSources.put(dsName, d); + counter++; + } catch (Exception e) { + log.logError("Failed to build data source: " + dsName, e); + } + } + } + + if (counter > 0) { + changed.set(true); + } + + return counter; + } + + private DataSourceLocator() { + } +} diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/core/database/util/DatabaseUtil.java b/pentaho-kettle/src/main/java/org/pentaho/di/core/database/util/DatabaseUtil.java index 29626ee..cb74f84 100644 --- a/pentaho-kettle/src/main/java/org/pentaho/di/core/database/util/DatabaseUtil.java +++ b/pentaho-kettle/src/main/java/org/pentaho/di/core/database/util/DatabaseUtil.java @@ -67,6 +67,11 @@ protected static DataSource getDataSourceFromJndi(String dsName, Context ctx) th if (Const.isEmpty(dsName)) { throw new NamingException(BaseMessages.getString(PKG, "DatabaseUtil.DSNotFound", String.valueOf(dsName))); } + + // Too bad there's no context for us to figure out which master initiated the data source lookup + // FIXME FoundDS is polluted here... + DataSourceLocator.importDataSourcesFromMaster(FoundDS, ctx, dsName); + Object foundDs = FoundDS.get(dsName); if (foundDs != null) { return (DataSource) foundDs; From 15a70a701a97a4da9e737256ae88c7b701d9b1de Mon Sep 17 00:00:00 2001 From: Zhichun Wu Date: Sun, 28 Aug 2016 18:11:42 +0800 Subject: [PATCH 04/15] Slightly improve logging mechanism --- .../di/core/logging/LogChannelFileWriter.java | 10 +++++++ .../di/core/logging/LoggingRegistry.java | 28 +++++++++++++------ 2 files changed, 29 insertions(+), 9 deletions(-) diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/core/logging/LogChannelFileWriter.java b/pentaho-kettle/src/main/java/org/pentaho/di/core/logging/LogChannelFileWriter.java index d227ab5..4779ca9 100644 --- a/pentaho-kettle/src/main/java/org/pentaho/di/core/logging/LogChannelFileWriter.java +++ b/pentaho-kettle/src/main/java/org/pentaho/di/core/logging/LogChannelFileWriter.java @@ -65,6 +65,16 @@ public LogChannelFileWriter(String logChannelId, FileObject logFile, boolean app active = new AtomicBoolean(false); lastBufferLineNr = KettleLogStore.getLastBufferLineNr(); + // it's basic move to create the directory *before* creating log file + try { + FileObject parent = logFile == null ? null : logFile.getParent(); + if (parent != null && !parent.exists()) { + parent.createFolder(); + } + } catch (Exception e) { + // ignore this type of exception as eventually KettleVFS will handle this + } + try { logFileOutputStream = KettleVFS.getOutputStream(logFile, appending); } catch (IOException e) { diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/core/logging/LoggingRegistry.java b/pentaho-kettle/src/main/java/org/pentaho/di/core/logging/LoggingRegistry.java index 0cc99f1..891c7bf 100644 --- a/pentaho-kettle/src/main/java/org/pentaho/di/core/logging/LoggingRegistry.java +++ b/pentaho-kettle/src/main/java/org/pentaho/di/core/logging/LoggingRegistry.java @@ -29,14 +29,16 @@ import java.util.concurrent.ConcurrentHashMap; public class LoggingRegistry { - private static LoggingRegistry registry = new LoggingRegistry(); - private Map map; - private Map> childrenMap; - private Date lastModificationTime; - private int maxSize; - private final int DEFAULT_MAX_SIZE = 10000; + // OK, why not declare the followings as final? + private static final LoggingRegistry registry = new LoggingRegistry(); + private static final int DEFAULT_MAX_SIZE = 10000; + + private final Map map; + private final Map> childrenMap; + private final int maxSize; + private final Object syncObject = new Object(); - private Object syncObject = new Object(); + private Date lastModificationTime; private LoggingRegistry() { this.map = new ConcurrentHashMap(); @@ -52,8 +54,16 @@ public static LoggingRegistry getInstance() { public String registerLoggingSource(Object object) { synchronized (this.syncObject) { - - this.maxSize = Const.toInt(EnvUtil.getSystemProperty("KETTLE_MAX_LOGGING_REGISTRY_SIZE"), 10000); + // it does not make sense to me to check the setting every time registering a logging service, + // not to mention I once had the weird thread dump below(all threads were locked up): + // Thread 28307: (state = BLOCKED) + // - java.lang.Integer.parseInt(java.lang.String, int) @bci=4, line=542 (Compiled frame) + // - java.lang.Integer.parseInt(java.lang.String) @bci=3, line=615 (Compiled frame) + // - org.pentaho.di.core.Const.toInt(java.lang.String, int) @bci=1, line=1173 (Compiled frame) + // - org.pentaho.di.core.logging.LoggingRegistry.registerLoggingSource(java.lang.Object) @bci=16, line=63 (Compiled frame) + // ... + + // this.maxSize = Const.toInt( EnvUtil.getSystemProperty( "KETTLE_MAX_LOGGING_REGISTRY_SIZE" ), 10000 ); LoggingObject loggingSource = new LoggingObject(object); From c15ab2aed53724a2df25b8bb77c4484f2a8b6f8f Mon Sep 17 00:00:00 2001 From: Zhichun Wu Date: Sun, 28 Aug 2016 18:19:54 +0800 Subject: [PATCH 05/15] Add exclusive execution rule to QuartzScheduler --- .../quartz/ActionAdapterQuartzJob.java | 18 +- .../quartz/ExclusiveKettleJobRule.java | 209 ++++++++++++++++++ .../scheduler2/quartz/QuartzScheduler.java | 5 + .../quartz/QuartzSchedulerHelper.java | 63 ++++++ 4 files changed, 291 insertions(+), 4 deletions(-) create mode 100644 pentaho-platform/src/main/java/org/pentaho/platform/scheduler2/quartz/ExclusiveKettleJobRule.java create mode 100644 pentaho-platform/src/main/java/org/pentaho/platform/scheduler2/quartz/QuartzSchedulerHelper.java diff --git a/pentaho-platform/src/main/java/org/pentaho/platform/scheduler2/quartz/ActionAdapterQuartzJob.java b/pentaho-platform/src/main/java/org/pentaho/platform/scheduler2/quartz/ActionAdapterQuartzJob.java index b6bc6cc..5e33347 100644 --- a/pentaho-platform/src/main/java/org/pentaho/platform/scheduler2/quartz/ActionAdapterQuartzJob.java +++ b/pentaho-platform/src/main/java/org/pentaho/platform/scheduler2/quartz/ActionAdapterQuartzJob.java @@ -44,9 +44,7 @@ import org.pentaho.platform.util.messages.LocaleHelper; import org.pentaho.platform.util.web.MimeHelper; import org.quartz.Job; -import org.quartz.JobDataMap; -import org.quartz.JobExecutionContext; -import org.quartz.JobExecutionException; +import org.quartz.*; import java.io.OutputStream; import java.io.Serializable; @@ -69,7 +67,8 @@ public class ActionAdapterQuartzJob implements Job { private static final long RETRY_SLEEP_AMOUNT = 10000; private String outputFilePath = null; - private Object lock = new Object(); + // Without "final" here it's kind of scary... + private final Object lock = new Object(); protected Class resolveClass(JobDataMap jobDataMap) throws PluginBeanException, JobExecutionException { String actionClass = jobDataMap.getString(QuartzScheduler.RESERVEDMAPKEY_ACTIONCLASS); @@ -112,6 +111,17 @@ protected Class resolveClass(JobDataMap jobDataMap) throws PluginBeanExceptio @SuppressWarnings("unchecked") public void execute(JobExecutionContext context) throws JobExecutionException { + Scheduler scheduler = null; + try { + IScheduler pentahoScheduler = PentahoSystem.getObjectFactory().get(IScheduler.class, "IScheduler2", null); + scheduler = pentahoScheduler instanceof QuartzScheduler + ? ((QuartzScheduler) pentahoScheduler).getQuartzScheduler() : null; + } catch (Exception e) { + // ignore + } + + QuartzSchedulerHelper.applyJobExecutionRules(scheduler, context == null ? null : context.getJobDetail()); + JobDataMap jobDataMap = context.getMergedJobDataMap(); String actionUser = jobDataMap.getString(QuartzScheduler.RESERVEDMAPKEY_ACTIONUSER); diff --git a/pentaho-platform/src/main/java/org/pentaho/platform/scheduler2/quartz/ExclusiveKettleJobRule.java b/pentaho-platform/src/main/java/org/pentaho/platform/scheduler2/quartz/ExclusiveKettleJobRule.java new file mode 100644 index 0000000..662395d --- /dev/null +++ b/pentaho-platform/src/main/java/org/pentaho/platform/scheduler2/quartz/ExclusiveKettleJobRule.java @@ -0,0 +1,209 @@ +/*! ****************************************************************************** + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + ******************************************************************************/ +package org.pentaho.platform.scheduler2.quartz; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.quartz.*; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import static org.pentaho.platform.scheduler2.quartz.QuartzSchedulerHelper.*; + +/** + * Define exclusive execution rule, which will be used in scheduling service. + * + * @author Zhichun Wu + */ +public class ExclusiveKettleJobRule { // implements TriggerListener { + static final ExclusiveKettleJobRule instance = new ExclusiveKettleJobRule(); + + private static final Log logger = LogFactory.getLog(ExclusiveKettleJobRule.class); + + /* It won't help even you implemented TriggerListener - you have to know which jobs are running + and when did last trigger completed, regardless it's success or failed + + private static final String TRIGGER_LISTENER_NAME = "ExclusiveTriggerListener"; + + private final Map jobFireDates = Collections.synchronizedMap(new HashMap<>()); + + private static String getExclusiveJobName(JobExecutionContext jobExecutionContext) { + Scheduler scheduler = jobExecutionContext.getScheduler(); + JobDetail jobDetail = jobExecutionContext.getJobDetail(); + + QuartzJobKey jobKey = extractJobKey(jobDetail); + + if (scheduler != null && jobKey != null) { + JobDataMap params = jobDetail.getJobDataMap(); + String actionId = params.getString(QuartzScheduler.RESERVEDMAPKEY_ACTIONID); + Object streamProvider = params.get(QuartzScheduler.RESERVEDMAPKEY_STREAMPROVIDER); + streamProvider = streamProvider == null ? null : streamProvider.toString(); + Object map = params.get(RESERVEDMAPKEY_PARAMETERS); + Object execPolicy = map instanceof Map ? ((Map) map).get(RESERVEDMAPKEY_EXECPOLICY) : null; + + if (streamProvider != null && EXEC_POLICY_EXCLUSIVE.equals(execPolicy) && + (KETTLE_JOB_ACTIONID.equals(actionId) || KETTLE_TRANS_ACTIONID.equals(actionId))) { + return new StringBuilder() + .append(jobKey.getJobName()).append('.').append(jobKey.getUserName()).toString(); + } + } + + return null; + } + */ + + private static boolean compareParameters(Object map1, Object map2) { + if (!(map1 instanceof Map) || !(map2 instanceof Map)) { + return false; + } + + Map m1 = (Map) map1; + Map m2 = (Map) map2; + + boolean isSame = m1.size() == m2.size(); + + if (isSame) { + for (Object key : m1.keySet()) { + if (!(isSame = Objects.equals(m1.get(key), m2.get(key)))) { + break; + } + } + } + + return isSame; + } + + void applyRule(Scheduler scheduler, JobDetail jobDetail) throws JobExecutionException { + QuartzJobKey jobKey = extractJobKey(jobDetail); + + if (scheduler == null || jobKey == null) { + return; + } + + JobDataMap params = jobDetail.getJobDataMap(); + String actionId = params.getString(QuartzScheduler.RESERVEDMAPKEY_ACTIONID); + Object streamProvider = params.get(QuartzScheduler.RESERVEDMAPKEY_STREAMPROVIDER); + streamProvider = streamProvider == null ? null : streamProvider.toString(); + Object map = params.get(RESERVEDMAPKEY_PARAMETERS); + Object execPolicy = map instanceof Map ? ((Map) map).get(RESERVEDMAPKEY_EXECPOLICY) : null; + + if (streamProvider != null && EXEC_POLICY_EXCLUSIVE.equals(execPolicy) && + (KETTLE_JOB_ACTIONID.equals(actionId) || KETTLE_TRANS_ACTIONID.equals(actionId))) { + List executingJobs; + try { + executingJobs = (List) scheduler.getCurrentlyExecutingJobs(); + } catch (SchedulerException e) { + executingJobs = new ArrayList<>(0); + } + + for (JobExecutionContext ctx : executingJobs) { + JobDetail detail = ctx.getJobDetail(); + if (jobDetail == detail) { // ignore the exact same job + continue; + } + + QuartzJobKey key = extractJobKey(detail); + JobDataMap dataMap = detail.getJobDataMap(); + + if (logger.isDebugEnabled()) { + logger.debug(String.valueOf(ctx) + "\r\nTrigger = [" + ctx.getTrigger() + "]\r\nJobs are equal ? " + (jobDetail == detail)); + } + + if (key != null && + actionId.equals(dataMap.getString(QuartzScheduler.RESERVEDMAPKEY_ACTIONID)) && + // FIXME this is tricky but might be better than comparing stream objects + // see https://github.com/pentaho/pentaho-platform/blob/6.1.0.1-R/extensions/src/org/pentaho/platform/web/http/api/resources/RepositoryFileStreamProvider.java + streamProvider.equals(String.valueOf( + dataMap.get(QuartzScheduler.RESERVEDMAPKEY_STREAMPROVIDER))) && + jobKey.getJobName().equals(key.getJobName()) && + jobKey.getUserName().equals(key.getUserName()) && + jobDetail.getGroup().equals(detail.getGroup()) && + compareParameters(map, dataMap.get(RESERVEDMAPKEY_PARAMETERS))) { + String msg = new StringBuilder() + .append("Discard exclusive job [") + .append(jobKey) + .append("] because [") + .append(detail) + .append("] is running") + .toString(); + logger.error(msg); + throw new JobExecutionException(msg); + } + } + } + } + + /* + @Override + public String getName() { + return TRIGGER_LISTENER_NAME; + } + + @Override + public void triggerFired(Trigger trigger, JobExecutionContext jobExecutionContext) { + } + + @Override + public boolean vetoJobExecution(Trigger trigger, JobExecutionContext jobExecutionContext) { + boolean vetoed = false; + + String jobName = getExclusiveJobName(jobExecutionContext); + if (jobName != null) { + Date lastFireTime = jobFireDates.get(jobName); + Date fireTime = jobExecutionContext.getFireTime(); + if (lastFireTime != null && lastFireTime.compareTo(fireTime) < 0) { + jobFireDates.put(jobName, fireTime); + vetoed = true; + if (logger.isWarnEnabled()) { + logger.warn(new StringBuilder() + .append("*** Cancel trigger fired at ") + .append(fireTime) + .append(" as exclusive job[") + .append(jobName).append("] is running since ") + .append(lastFireTime) + .append(" ***").toString()); + } + } + } + + return vetoed; + } + + @Override + public void triggerMisfired(Trigger trigger) { + } + + @Override + public void triggerComplete(Trigger trigger, JobExecutionContext jobExecutionContext, int i) { + String jobName = getExclusiveJobName(jobExecutionContext); + if (jobName != null) { + Date fireTime = jobFireDates.remove(jobName); + if (logger.isInfoEnabled()) { + logger.info(new StringBuilder() + .append("===> Trigger fired at ") + .append(fireTime) + .append(" for exclusive job[") + .append(jobName).append("] is completed (instruction code = ") + .append(i) + .append(" <===").toString()); + } + } + } + */ +} diff --git a/pentaho-platform/src/main/java/org/pentaho/platform/scheduler2/quartz/QuartzScheduler.java b/pentaho-platform/src/main/java/org/pentaho/platform/scheduler2/quartz/QuartzScheduler.java index 536c22f..5c3b4c4 100644 --- a/pentaho-platform/src/main/java/org/pentaho/platform/scheduler2/quartz/QuartzScheduler.java +++ b/pentaho-platform/src/main/java/org/pentaho/platform/scheduler2/quartz/QuartzScheduler.java @@ -118,6 +118,7 @@ public Scheduler getQuartzScheduler() throws org.quartz.SchedulerException { * us in that regard. */ quartzScheduler = quartzSchedulerFactory.getScheduler(); + QuartzSchedulerHelper.init(quartzScheduler); } logger.debug("Using quartz scheduler " + quartzScheduler); //$NON-NLS-1$ @@ -275,6 +276,9 @@ protected Job createJob(String jobName, Map jobParams, IJo try { Scheduler scheduler = getQuartzScheduler(); + + QuartzSchedulerHelper.applyJobExecutionRules(scheduler, jobDetail); + if (triggerCalendar != null) { scheduler.addCalendar(jobId.toString(), triggerCalendar, false, false); quartzTrigger.setCalendarName(jobId.toString()); @@ -339,6 +343,7 @@ public void updateJob(String jobId, Map jobParams, IJobTri } JobDetail jobDetail = createJobDetails(jobKey, jobParams); + QuartzSchedulerHelper.applyJobExecutionRules(scheduler, jobDetail); scheduler.addJob(jobDetail, true); if (triggerCalendar != null) { scheduler.addCalendar(jobId.toString(), triggerCalendar, true, true); diff --git a/pentaho-platform/src/main/java/org/pentaho/platform/scheduler2/quartz/QuartzSchedulerHelper.java b/pentaho-platform/src/main/java/org/pentaho/platform/scheduler2/quartz/QuartzSchedulerHelper.java new file mode 100644 index 0000000..5984528 --- /dev/null +++ b/pentaho-platform/src/main/java/org/pentaho/platform/scheduler2/quartz/QuartzSchedulerHelper.java @@ -0,0 +1,63 @@ +/*! ****************************************************************************** + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + ******************************************************************************/ +package org.pentaho.platform.scheduler2.quartz; + +import org.quartz.JobDetail; +import org.quartz.JobExecutionException; +import org.quartz.Scheduler; +import org.quartz.SchedulerException; + +/** + * Utility class for QuartzShceduler. + * + * @author Zhichun Wu + */ +public final class QuartzSchedulerHelper { + static final String RESERVEDMAPKEY_PARAMETERS = "parameters"; + static final String RESERVEDMAPKEY_EXECPOLICY = "executionPolicy"; + + static final String EXEC_POLICY_DEFAULT = "Unrestricted"; + static final String EXEC_POLICY_EXCLUSIVE = "Exclusive"; + + static final String KETTLE_JOB_ACTIONID = "kjb.backgroundExecution"; + static final String KETTLE_TRANS_ACTIONID = "ktr.backgroundExecution"; + + static QuartzJobKey extractJobKey(JobDetail jobDetail) { + QuartzJobKey jobKey = null; + try { + jobKey = jobDetail == null ? null : QuartzJobKey.parse(jobDetail.getName()); + } catch (org.pentaho.platform.api.scheduler2.SchedulerException e) { + // ignore error + } + + return jobKey; + } + + // http://stackoverflow.com/questions/19733981/quartz-skipping-duplicate-job-fires-scheduled-with-same-fire-time + static void init(Scheduler scheduler) throws SchedulerException { + if (scheduler == null) { + return; + } + + // attached listeners even the scheduler is shutted down + // scheduler.addTriggerListener(ExclusiveKettleJobRule.instance); + } + + // http://stackoverflow.com/questions/2676295/quartz-preventing-concurrent-instances-of-a-job-in-jobs-xml + static void applyJobExecutionRules(Scheduler scheduler, JobDetail jobDetail) throws JobExecutionException { + ExclusiveKettleJobRule.instance.applyRule(scheduler, jobDetail); + } +} From 2dec4c85bb80f59659b2d24c409c8b043a20bc04 Mon Sep 17 00:00:00 2001 From: Zhichun Wu Date: Sun, 28 Aug 2016 18:23:15 +0800 Subject: [PATCH 06/15] Fix stupid NPE --- .../di/repository/pur/LazyUnifiedRepositoryDirectory.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/repository/pur/LazyUnifiedRepositoryDirectory.java b/pentaho-kettle/src/main/java/org/pentaho/di/repository/pur/LazyUnifiedRepositoryDirectory.java index c05dd3d..d8cc6eb 100644 --- a/pentaho-kettle/src/main/java/org/pentaho/di/repository/pur/LazyUnifiedRepositoryDirectory.java +++ b/pentaho-kettle/src/main/java/org/pentaho/di/repository/pur/LazyUnifiedRepositoryDirectory.java @@ -162,7 +162,8 @@ public List getRepositoryObjects() { RepositoryLock lock = null; try { - lock = lockService.getLock(child); + // No idea why introducing the unnecessary dependency and NPE here... + lock = lockService == null ? null : lockService.getLock(child); RepositoryObjectType objectType = getObjectType(child.getName()); EERepositoryObject repositoryObject = new EERepositoryObject(child, this, null, objectType, null, lock, false); From 67c223ddc42d0f5884e9cd484360cfdea1e63239 Mon Sep 17 00:00:00 2001 From: Zhichun Wu Date: Sun, 28 Aug 2016 18:39:51 +0800 Subject: [PATCH 07/15] Enhance Carte server * Communicate with master time from time * Re-register slave and/or re-import data sources from master when required --- .../main/java/org/pentaho/di/www/Carte.java | 87 +++--- .../org/pentaho/di/www/CarteSingleton.java | 25 +- .../java/org/pentaho/di/www/JaxbList.java | 50 +++ .../org/pentaho/di/www/MasterDetector.java | 294 ++++++++++++++++++ 4 files changed, 394 insertions(+), 62 deletions(-) create mode 100644 pentaho-kettle/src/main/java/org/pentaho/di/www/JaxbList.java create mode 100644 pentaho-kettle/src/main/java/org/pentaho/di/www/MasterDetector.java diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/www/Carte.java b/pentaho-kettle/src/main/java/org/pentaho/di/www/Carte.java index 3e1cd74..9a51a6c 100644 --- a/pentaho-kettle/src/main/java/org/pentaho/di/www/Carte.java +++ b/pentaho-kettle/src/main/java/org/pentaho/di/www/Carte.java @@ -29,16 +29,15 @@ import com.sun.jersey.api.client.filter.HTTPBasicAuthFilter; import com.sun.jersey.api.json.JSONConfiguration; import org.apache.commons.cli.*; -import org.apache.commons.lang.StringUtils; import org.apache.commons.vfs2.FileObject; import org.pentaho.di.cluster.SlaveServer; import org.pentaho.di.core.Const; import org.pentaho.di.core.KettleClientEnvironment; import org.pentaho.di.core.KettleEnvironment; +import org.pentaho.di.core.database.util.DataSourceLocator; import org.pentaho.di.core.logging.KettleLogStore; import org.pentaho.di.core.logging.LogChannel; import org.pentaho.di.core.logging.LogChannelInterface; -import org.pentaho.di.core.util.EnvUtil; import org.pentaho.di.core.vfs.KettleVFS; import org.pentaho.di.core.xml.XMLHandler; import org.pentaho.di.i18n.BaseMessages; @@ -50,9 +49,13 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; -import java.util.Properties; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; public class Carte { + private final static ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1); + private static Class PKG = Carte.class; // for i18n purposes, needed by Translator2!! private WebServer webServer; @@ -67,9 +70,10 @@ public Carte(final SlaveServerConfig config) throws Exception { public Carte(final SlaveServerConfig config, Boolean joinOverride) throws Exception { this.config = config; - allOK = true; + // allOK = true; CarteSingleton.setSlaveServerConfig(config); + LogChannelInterface log = CarteSingleton.getInstance().getLog(); final TransformationMap transformationMap = CarteSingleton.getInstance().getTransformationMap(); @@ -89,7 +93,7 @@ public Carte(final SlaveServerConfig config, Boolean joinOverride) throws Except } catch (Exception e) { log.logError(BaseMessages.getString(PKG, "Carte.Error.CanNotPartPort", slaveServer.getHostname(), "" + port), e); - allOK = false; + // allOK = false; } } @@ -97,58 +101,23 @@ public Carte(final SlaveServerConfig config, Boolean joinOverride) throws Except // The master might be dead or not alive yet at the time we send this message. // Repeating the registration over and over every few minutes might harden this sort of problems. // - Properties masterProperties = null; - if (config.isReportingToMasters()) { - String propertiesMaster = slaveServer.getPropertiesMasterName(); - for (final SlaveServer master : config.getMasters()) { - // Here we use the username/password specified in the slave server section of the configuration. - // This doesn't have to be the same pair as the one used on the master! - // - try { - SlaveServerDetection slaveServerDetection = new SlaveServerDetection(slaveServer.getClient()); - master.sendXML(slaveServerDetection.getXML(), RegisterSlaveServlet.CONTEXT_PATH + "/"); - log.logBasic("Registered this slave server to master slave server [" + master.toString() + "] on address [" - + master.getServerAndPort() + "]"); - } catch (Exception e) { - log.logError("Unable to register to master slave server [" + master.toString() + "] on address [" + master - .getServerAndPort() + "]"); - allOK = false; - } - try { - if (!StringUtils.isBlank(propertiesMaster) && propertiesMaster.equalsIgnoreCase(master.getName())) { - if (masterProperties != null) { - log.logError("More than one primary master server. Master name is " + propertiesMaster); - } else { - masterProperties = master.getKettleProperties(); - log.logBasic("Got properties from master server [" + master.toString() + "], address [" + master - .getServerAndPort() + "]"); - } - } - } catch (Exception e) { - log.logError("Unable to get properties from master server [" + master.toString() + "], address [" + master - .getServerAndPort() + "]"); - allOK = false; - } - } - } - if (masterProperties != null) { - EnvUtil.applyKettleProperties(masterProperties, slaveServer.isOverrideExistingProperties()); - } + // allOK = detector.registerOnMasters(); + // No longer need the following line as we did in Carte constructor // If we need to time out finished or idle objects, we should create a timer in the background to clean // this is done automatically now // CarteSingleton.installPurgeTimer(config, log, transformationMap, jobMap); - if (allOK) { - boolean shouldJoin = config.isJoining(); - if (joinOverride != null) { - shouldJoin = joinOverride; - } - - this.webServer = - new WebServer(log, transformationMap, jobMap, socketRepository, detections, hostname, port, shouldJoin, - config.getPasswordFile(), slaveServer.getSslConfig()); + // if (allOK) { + boolean shouldJoin = config.isJoining(); + if (joinOverride != null) { + shouldJoin = joinOverride; } + + this.webServer = + new WebServer(log, transformationMap, jobMap, socketRepository, detections, hostname, port, shouldJoin, + config.getPasswordFile(), slaveServer.getSslConfig()); + // } } public static void main(String[] args) { @@ -156,6 +125,7 @@ public static void main(String[] args) { parseAndRunCommand(args); } catch (Exception e) { e.printStackTrace(); + scheduler.shutdown(); } } @@ -231,6 +201,10 @@ private static void parseAndRunCommand(String[] args) throws Exception { private static void setKettleEnvironment() throws Exception { KettleClientEnvironment.getInstance().setClient(KettleClientEnvironment.ClientType.CARTE); KettleEnvironment.init(); + + // http://forums.pentaho.com/showthread.php?156592-Kettle-5-0-1-Log4j-plugin-usage + // LoggingBuffer loggingBuffer = KettleLogStore.getAppender(); + // loggingBuffer.addLoggingEventListener(new Log4jLogging()); } public static void runCarte(SlaveServerConfig config) throws Exception { @@ -238,9 +212,18 @@ public static void runCarte(SlaveServerConfig config) throws Exception { config.setJoining(true); + MasterDetector detector = MasterDetector.instance; + DataSourceLocator.activate(); + Carte carte = new Carte(config, false); CarteSingleton.setCarte(carte); + // register first + detector.registerOnMasters(); + // and then enter the loop to check and re-register as required + scheduler.scheduleWithFixedDelay(detector, detector.getInitialDelay(), + detector.getRefreshInterval(), TimeUnit.MILLISECONDS); + carte.getWebServer().join(); } @@ -310,7 +293,9 @@ private static String stripOff(String target, String strip) { private static void shutdown(String hostname, String port, String username, String password) { try { + DataSourceLocator.deactivate(); callStopCarteRestService(hostname, port, username, password); + scheduler.shutdown(); } catch (Exception e) { e.printStackTrace(); } diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/www/CarteSingleton.java b/pentaho-kettle/src/main/java/org/pentaho/di/www/CarteSingleton.java index e839cfa..7a2abb0 100644 --- a/pentaho-kettle/src/main/java/org/pentaho/di/www/CarteSingleton.java +++ b/pentaho-kettle/src/main/java/org/pentaho/di/www/CarteSingleton.java @@ -82,28 +82,31 @@ private CarteSingleton(SlaveServerConfig config) throws KettleException { // Repeating the registration over and over every few minutes might // harden this sort of problems. // - if (config.isReportingToMasters()) { + + /* sorry MasterDetector will take care of the following + if ( config.isReportingToMasters() ) { String hostname = slaveServer.getHostname(); final SlaveServer client = - new SlaveServer("Dynamic slave [" + hostname + ":" + port + "]", hostname, "" + port, slaveServer - .getUsername(), slaveServer.getPassword()); - for (final SlaveServer master : config.getMasters()) { + new SlaveServer( "Dynamic slave [" + hostname + ":" + port + "]", hostname, "" + port, slaveServer + .getUsername(), slaveServer.getPassword() ); + for ( final SlaveServer master : config.getMasters() ) { // Here we use the username/password specified in the slave // server section of the configuration. // This doesn't have to be the same pair as the one used on the // master! // try { - SlaveServerDetection slaveServerDetection = new SlaveServerDetection(client); - master.sendXML(slaveServerDetection.getXML(), RegisterSlaveServlet.CONTEXT_PATH + "/"); - log.logBasic("Registered this slave server to master slave server [" - + master.toString() + "] on address [" + master.getServerAndPort() + "]"); - } catch (Exception e) { - log.logError("Unable to register to master slave server [" - + master.toString() + "] on address [" + master.getServerAndPort() + "]"); + SlaveServerDetection slaveServerDetection = new SlaveServerDetection( client ); + master.sendXML( slaveServerDetection.getXML(), RegisterSlaveServlet.CONTEXT_PATH + "/" ); + log.logBasic( "Registered this slave server to master slave server [" + + master.toString() + "] on address [" + master.getServerAndPort() + "]" ); + } catch ( Exception e ) { + log.logError( "Unable to register to master slave server [" + + master.toString() + "] on address [" + master.getServerAndPort() + "]" ); } } } + */ } } diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/www/JaxbList.java b/pentaho-kettle/src/main/java/org/pentaho/di/www/JaxbList.java new file mode 100644 index 0000000..b89dd59 --- /dev/null +++ b/pentaho-kettle/src/main/java/org/pentaho/di/www/JaxbList.java @@ -0,0 +1,50 @@ +/*! ****************************************************************************** + * + * Pentaho Data Integration + * + * Copyright (C) 2002-2013 by Pentaho : http://www.pentaho.com + * + ******************************************************************************* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + ******************************************************************************/ +package org.pentaho.di.www; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlRootElement; +import java.util.List; + +@XmlRootElement(name = "List") +@XmlAccessorType(XmlAccessType.FIELD) +public class JaxbList { + @XmlElement(name = "Item") + protected List list; + + public JaxbList() { + } + + public JaxbList(List list) { + this.list = list; + } + + public List getList() { + return list; + } + + public void setList(List list) { + this.list = list; + } +} \ No newline at end of file diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/www/MasterDetector.java b/pentaho-kettle/src/main/java/org/pentaho/di/www/MasterDetector.java new file mode 100644 index 0000000..18a3c66 --- /dev/null +++ b/pentaho-kettle/src/main/java/org/pentaho/di/www/MasterDetector.java @@ -0,0 +1,294 @@ +/*! ****************************************************************************** + * + * Pentaho Data Integration + * + * Copyright (C) 2002-2013 by Pentaho : http://www.pentaho.com + * + ******************************************************************************* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + ******************************************************************************/ +package org.pentaho.di.www; + +import org.apache.commons.lang.StringUtils; +import org.pentaho.database.model.DatabaseConnection; +import org.pentaho.database.model.IDatabaseConnection; +import org.pentaho.di.cluster.SlaveServer; +import org.pentaho.di.core.Const; +import org.pentaho.di.core.database.util.DataSourceLocator; +import org.pentaho.di.core.logging.LogChannelInterface; +import org.pentaho.di.core.util.EnvUtil; + +import javax.xml.bind.JAXBContext; +import javax.xml.bind.JAXBException; +import javax.xml.bind.Unmarshaller; +import java.io.StringReader; +import java.util.*; + +/** + * This class is responsible for communicating with master node, BA server in this case. + * It helps to re-register slave or re-import data sources as required(i.e. master restarted). + * + * @author Zhichun Wu + */ +final class MasterDetector implements Runnable { + static final class MasterServerStatus { + boolean alive = false; + boolean restarted = false; + long uptime = 0L; + String dslist = EMPTY_STRING; + } + + final static String PROP_INITIAL_DELAY = "KETTLE_MASTER_DETECTOR_INITIAL_DELAY"; + final static String PROP_REFRESH_INTERVAL = "KETTLE_MASTER_DETECTOR_REFRESH_INTERVAL"; + + final static long DEFAULT_INITIAL_DELAY = 1 * 1000L; // 1 seconds + final static long DEFAULT_REFRESH_INTERVAL = 10 * 1000L; // 10 seconds + + final static String PATH_QUERY_STATUS = "/kettle/status?xml=Y"; + final static String PATH_QUERY_SLAVES = "/kettle/getSlaves?xml=Y"; + // this is not build-in API like above in Kettle but comes from BA server + final static String PATH_QUERY_DS = "/plugin/data-access/api/datasource/jdbc/connection"; + + final static String TAG_UPTIME_BEGIN = ""; + final static String TAG_UPTIME_END = ""; + + final static String TAG_HOST_BEGIN = ""; + final static String TAG_HOST_END = ""; + final static String TAG_PORT_BEGIN = ""; + final static String TAG_PORT_END = ""; + + final static String EMPTY_STRING = ""; + + final static MasterDetector instance = new MasterDetector(); + + private final long initialDelay; + private final long refreshInterval; + private final JAXBContext jaxbContext; + private final Map masterStatus + = Collections.synchronizedMap(new HashMap(3)); + + private MasterDetector() { + JAXBContext context = null; + try { + context = JAXBContext.newInstance(JaxbList.class, DatabaseConnection.class); + } catch (JAXBException e) { + e.printStackTrace(); + } finally { + jaxbContext = context; + } + + this.initialDelay = Const.toLong(EnvUtil.getSystemProperty(PROP_INITIAL_DELAY), DEFAULT_INITIAL_DELAY); + this.refreshInterval = Const.toLong(EnvUtil.getSystemProperty(PROP_REFRESH_INTERVAL), DEFAULT_REFRESH_INTERVAL); + } + + private void checkConfig() { + Carte carte = CarteSingleton.getCarte(); + SlaveServerConfig config = carte == null ? null : carte.getConfig(); + List masters = config == null ? null : config.getMasters(); + if (config == null || masters == null || masters.size() == 0) { + throw new NullPointerException("At least one master is required for the cluster"); + } + + if (!config.isReportingToMasters()) { + throw new IllegalStateException("Only slave server can register on master nodes"); + } + } + + private void checkMasterRegistrion() { + checkConfig(); + + SlaveServerConfig config = CarteSingleton.getCarte().getConfig(); + SlaveServer slaveServer = config.getSlaveServer(); + + LogChannelInterface log = CarteSingleton.getInstance().getLog(); + + for (final SlaveServer master : config.getMasters()) { + String name = master.getName(); + MasterServerStatus status = masterStatus.get(name); + + if (status == null || !status.alive) { + // this should never happen + log.logError(new StringBuilder().append("Skip master ") + .append(name).append(" as it seems dead").toString()); + continue; + } + + String tag = new StringBuilder().append(TAG_HOST_BEGIN) + .append(slaveServer.getHostname()).append(TAG_HOST_END).append(TAG_PORT_BEGIN) + .append(slaveServer.getPort()).append(TAG_PORT_END).toString(); + + try { + String xml = master.sendXML(EMPTY_STRING, PATH_QUERY_SLAVES); + if (xml != null && (status.restarted || xml.indexOf(tag) < 0)) { + registerOnMasters(master); + } + } catch (Throwable t) { + log.logError("Failed to check slaves of master " + name + ": " + t.getMessage()); + status.alive = false; + continue; + } + + // now check data sources + Map headerValues = new HashMap(1); + headerValues.put("Accept", "application/xml"); + String dslist = EMPTY_STRING; + try { + // no retry + dslist = master.execService(PATH_QUERY_DS, headerValues); + if (dslist != null && (!status.dslist.equals(dslist) || status.restarted)) { + Unmarshaller jaxbUnmarshaller = jaxbContext.createUnmarshaller(); + JaxbList list = (JaxbList) jaxbUnmarshaller.unmarshal(new StringReader(dslist)); + Map mapping + = new HashMap(list.getList().size()); + for (String s : list.getList()) { + String ds = master.execService( + new StringBuilder().append(PATH_QUERY_DS).append('/').append(s).toString(), + headerValues); + mapping.put(s, (IDatabaseConnection) jaxbUnmarshaller.unmarshal(new StringReader(ds))); + } + + log.logBasic( + new StringBuilder().append(DataSourceLocator.updateDataSourceMappings(mapping)) + .append(" of ").append(mapping.size()).append(" data sources imported from master ") + .append(master.getName()).toString()); + + status.dslist = dslist; + } + // log.logBasic(xml); + } catch (Throwable t) { + log.logError("Failed to check data sources of master " + name + ": " + t.getMessage()); + // status.alive = false; + } + } + } + + private void checkMasterStatus() { + checkConfig(); + + SlaveServerConfig config = CarteSingleton.getCarte().getConfig(); + LogChannelInterface log = CarteSingleton.getInstance().getLog(); + + for (final SlaveServer master : config.getMasters()) { + String name = master.getName(); + MasterServerStatus status = masterStatus.get(name); + if (status == null) { + status = new MasterServerStatus(); + masterStatus.put(name, status); + } + + int startIndex = 0; + long uptime = 0L; + try { + String xml = master.sendXML(EMPTY_STRING, PATH_QUERY_STATUS); + + startIndex = xml == null ? -1 : xml.indexOf(TAG_UPTIME_BEGIN); + + if (startIndex > 0) { + startIndex = startIndex + TAG_UPTIME_BEGIN.length(); + int endIndex = xml.indexOf(TAG_UPTIME_END, startIndex); + if (endIndex > startIndex) { + uptime = Long.parseLong(xml.substring(startIndex, endIndex)); + } + } + + log.logDebug(new StringBuilder().append(name).append(':').append(uptime) + .append('(').append(uptime > status.uptime).append(')').toString()); + } catch (Throwable t) { + log.logError("Failed to get status of master " + name + ": " + t.getMessage()); + } finally { + status.alive = startIndex > 0; + status.restarted = uptime > 0L && status.uptime > 0L && uptime < status.uptime; + if (uptime > 0L) { + status.uptime = uptime; + } + } + } + } + + long getInitialDelay() { + return this.initialDelay; + } + + long getRefreshInterval() { + return this.refreshInterval; + } + + boolean registerOnMasters(SlaveServer... masters) { + checkConfig(); + + boolean allOK = true; + + SlaveServerConfig config = CarteSingleton.getCarte().getConfig(); + SlaveServer slaveServer = config.getSlaveServer(); + masters = masters == null || masters.length == 0 + ? config.getMasters().toArray(new SlaveServer[config.getMasters().size()]) : masters; + + LogChannelInterface log = CarteSingleton.getInstance().getLog(); + + Properties masterProperties = null; + String propertiesMaster = slaveServer.getPropertiesMasterName(); + for (final SlaveServer master : masters) { + // Here we use the username/password specified in the slave server section of the configuration. + // This doesn't have to be the same pair as the one used on the master! + // + try { + SlaveServerDetection slaveServerDetection = new SlaveServerDetection(slaveServer.getClient()); + master.sendXML(slaveServerDetection.getXML(), RegisterSlaveServlet.CONTEXT_PATH + "/"); + log.logBasic("Registered this slave server to master slave server [" + master.toString() + "] on address [" + + master.getServerAndPort() + "]"); + } catch (Exception e) { + log.logError("Unable to register to master slave server [" + master.toString() + "] on address [" + master + .getServerAndPort() + "]"); + allOK = false; + } + try { + if (!StringUtils.isBlank(propertiesMaster) && propertiesMaster.equalsIgnoreCase(master.getName())) { + if (masterProperties != null) { + log.logError("More than one primary master server. Master name is " + propertiesMaster); + } else { + masterProperties = master.getKettleProperties(); + log.logBasic("Got properties from master server [" + master.toString() + "], address [" + master + .getServerAndPort() + "]"); + } + } + } catch (Exception e) { + log.logError("Unable to get properties from master server [" + master.toString() + "], address [" + master + .getServerAndPort() + "]"); + allOK = false; + } + } + + if (masterProperties != null) { + EnvUtil.applyKettleProperties(masterProperties, slaveServer.isOverrideExistingProperties()); + } + + return allOK; + } + + @Override + public void run() { + try { + // first check if the masters are alive(and their uptime) - yes, we may have more than one master + checkMasterStatus(); + + // and then check if this slave has been registered in all these masters + // in case it's not(ex: master restarted), register the salve again + // lastly, re-import data sources if there's any change + checkMasterRegistrion(); + } catch (Exception e) { + // do NOT throw exception here or the scheduled task will stop running + } + } +} From 89f0166f954fe8c42f9a61817f034a8cbd784f34 Mon Sep 17 00:00:00 2001 From: Zhichun Wu Date: Sun, 28 Aug 2016 18:43:12 +0800 Subject: [PATCH 08/15] Add formatted code from Pentaho 6.1.0.1-196 --- .../di/core/database/DatabaseMeta.java | 2856 +++++++++++++++++ .../org/pentaho/di/www/GetStatusServlet.java | 459 +++ 2 files changed, 3315 insertions(+) create mode 100644 pentaho-kettle/src/main/java/org/pentaho/di/core/database/DatabaseMeta.java create mode 100644 pentaho-kettle/src/main/java/org/pentaho/di/www/GetStatusServlet.java diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/core/database/DatabaseMeta.java b/pentaho-kettle/src/main/java/org/pentaho/di/core/database/DatabaseMeta.java new file mode 100644 index 0000000..1a497e0 --- /dev/null +++ b/pentaho-kettle/src/main/java/org/pentaho/di/core/database/DatabaseMeta.java @@ -0,0 +1,2856 @@ +// CHECKSTYLE:FileLength:OFF +/*! ****************************************************************************** + * + * Pentaho Data Integration + * + * Copyright (C) 2002-2016 by Pentaho : http://www.pentaho.com + * + ******************************************************************************* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + ******************************************************************************/ + +package org.pentaho.di.core.database; + +import org.pentaho.di.core.Const; +import org.pentaho.di.core.RowMetaAndData; +import org.pentaho.di.core.encryption.Encr; +import org.pentaho.di.core.exception.KettleDatabaseException; +import org.pentaho.di.core.exception.KettlePluginException; +import org.pentaho.di.core.exception.KettleValueException; +import org.pentaho.di.core.exception.KettleXMLException; +import org.pentaho.di.core.logging.LogChannel; +import org.pentaho.di.core.logging.LogChannelInterface; +import org.pentaho.di.core.plugins.DatabasePluginType; +import org.pentaho.di.core.plugins.PluginInterface; +import org.pentaho.di.core.plugins.PluginRegistry; +import org.pentaho.di.core.row.RowMetaInterface; +import org.pentaho.di.core.row.ValueMetaInterface; +import org.pentaho.di.core.row.value.ValueMetaBase; +import org.pentaho.di.core.row.value.ValueMetaString; +import org.pentaho.di.core.util.ExecutorUtil; +import org.pentaho.di.core.variables.VariableSpace; +import org.pentaho.di.core.variables.Variables; +import org.pentaho.di.core.xml.XMLHandler; +import org.pentaho.di.core.xml.XMLInterface; +import org.pentaho.di.i18n.BaseMessages; +import org.pentaho.di.repository.*; +import org.pentaho.di.shared.SharedObjectBase; +import org.pentaho.di.shared.SharedObjectInterface; +import org.w3c.dom.Node; + +import java.sql.ResultSet; +import java.util.*; +import java.util.concurrent.Callable; +import java.util.concurrent.Future; + +/** + * This class defines the database specific parameters for a certain database type. It also provides static information + * regarding a number of well known databases. + * + * @author Matt + * @since 18-05-2003 + */ +public class DatabaseMeta extends SharedObjectBase implements Cloneable, XMLInterface, SharedObjectInterface, + VariableSpace, RepositoryElementInterface { + private static Class PKG = Database.class; // for i18n purposes, needed by Translator2!! + + public static final String XML_TAG = "connection"; + + public static final RepositoryObjectType REPOSITORY_ELEMENT_TYPE = RepositoryObjectType.DATABASE; + + // Comparator for sorting databases alphabetically by name + public static final Comparator comparator = new Comparator() { + @Override + public int compare(DatabaseMeta dbm1, DatabaseMeta dbm2) { + return dbm1.getName().compareToIgnoreCase(dbm2.getName()); + } + }; + + private DatabaseInterface databaseInterface; + + private static volatile Future> allDatabaseInterfaces; + + static { + PluginRegistry.getInstance().addPluginListener(DatabasePluginType.class, + new org.pentaho.di.core.plugins.PluginTypeListener() { + @Override + public void pluginAdded(Object serviceObject) { + clearDatabaseInterfacesMap(); + } + + @Override + public void pluginRemoved(Object serviceObject) { + clearDatabaseInterfacesMap(); + + } + + @Override + public void pluginChanged(Object serviceObject) { + clearDatabaseInterfacesMap(); + } + }); + } + + private VariableSpace variables = new Variables(); + + private ObjectRevision objectRevision; + + private boolean readOnly = false; + + /** + * Indicates that the connections doesn't point to a type of database yet. + * + * @deprecated + */ + @Deprecated + public static final int TYPE_DATABASE_NONE = 0; + + /** + * Connection to a MySQL database + * + * @deprecated + */ + @Deprecated + public static final int TYPE_DATABASE_MYSQL = 1; + + /** + * Connection to an Oracle database + * + * @deprecated + */ + @Deprecated + public static final int TYPE_DATABASE_ORACLE = 2; + + /** + * Connection to an AS/400 (IBM iSeries) DB400 database + * + * @deprecated + */ + @Deprecated + public static final int TYPE_DATABASE_AS400 = 3; + + /** + * Connection to an Microsoft Access database + * + * @deprecated + */ + @Deprecated + public static final int TYPE_DATABASE_ACCESS = 4; + + /** + * Connection to a Microsoft SQL Server database + * + * @deprecated + */ + @Deprecated + public static final int TYPE_DATABASE_MSSQL = 5; + + /** + * Connection to an IBM DB2 database + * + * @deprecated + */ + @Deprecated + public static final int TYPE_DATABASE_DB2 = 6; + + /** + * Connection to a PostgreSQL database + * + * @deprecated + */ + @Deprecated + public static final int TYPE_DATABASE_POSTGRES = 7; + + /** + * Connection to an Intersystems Cache database + * + * @deprecated + */ + @Deprecated + public static final int TYPE_DATABASE_CACHE = 8; + + /** + * Connection to an IBM Informix database + * + * @deprecated + */ + @Deprecated + public static final int TYPE_DATABASE_INFORMIX = 9; + + /** + * Connection to a Sybase ASE database + * + * @deprecated + */ + @Deprecated + public static final int TYPE_DATABASE_SYBASE = 10; + + /** + * Connection to a Gupta SQLBase database + * + * @deprecated + */ + @Deprecated + public static final int TYPE_DATABASE_GUPTA = 11; + + /** + * Connection to a DBase III/IV/V database through JDBC + * + * @deprecated + */ + @Deprecated + public static final int TYPE_DATABASE_DBASE = 12; + + /** + * Connection to a FireBird database + * + * @deprecated + */ + @Deprecated + public static final int TYPE_DATABASE_FIREBIRD = 13; + + /** + * Connection to a SAP DB database + * + * @deprecated + */ + @Deprecated + public static final int TYPE_DATABASE_SAPDB = 14; + + /** + * Connection to a Hypersonic java database + * + * @deprecated + */ + @Deprecated + public static final int TYPE_DATABASE_HYPERSONIC = 15; + + /** + * Connection to a generic database + * + * @deprecated + */ + @Deprecated + public static final int TYPE_DATABASE_GENERIC = 16; + + /** + * Connection to an SAP R/3 system + * + * @deprecated + */ + @Deprecated + public static final int TYPE_DATABASE_SAPR3 = 17; + + /** + * Connection to an Ingress database + * + * @deprecated + */ + @Deprecated + public static final int TYPE_DATABASE_INGRES = 18; + + /** + * Connection to a Borland Interbase database + * + * @deprecated + */ + @Deprecated + public static final int TYPE_DATABASE_INTERBASE = 19; + + /** + * Connection to an ExtenDB database + * + * @deprecated + */ + @Deprecated + public static final int TYPE_DATABASE_EXTENDB = 20; + + /** + * Connection to a Teradata database + * + * @deprecated + */ + @Deprecated + public static final int TYPE_DATABASE_TERADATA = 21; + + /** + * Connection to an Oracle RDB database + * + * @deprecated + */ + @Deprecated + public static final int TYPE_DATABASE_ORACLE_RDB = 22; + + /** + * Connection to an H2 database + * + * @deprecated + */ + @Deprecated + public static final int TYPE_DATABASE_H2 = 23; + + /** + * Connection to a Netezza database + * + * @deprecated + */ + @Deprecated + public static final int TYPE_DATABASE_NETEZZA = 24; + + /** + * Connection to an IBM UniVerse database + * + * @deprecated + */ + @Deprecated + public static final int TYPE_DATABASE_UNIVERSE = 25; + + /** + * Connection to a SQLite database + * + * @deprecated + */ + @Deprecated + public static final int TYPE_DATABASE_SQLITE = 26; + + /** + * Connection to an Apache Derby database + * + * @deprecated + */ + @Deprecated + public static final int TYPE_DATABASE_DERBY = 27; + + /** + * Connection to a BMC Remedy Action Request System + * + * @deprecated + */ + @Deprecated + public static final int TYPE_DATABASE_REMEDY_AR_SYSTEM = 28; + + /** + * Connection to a Palo MOLAP Server + * + * @deprecated + */ + @Deprecated + public static final int TYPE_DATABASE_PALO = 29; + + /** + * Connection to a SybaseIQ ASE database + * + * @deprecated + */ + @Deprecated + public static final int TYPE_DATABASE_SYBASEIQ = 30; + + /** + * Connection to a Greenplum database + * + * @deprecated + */ + @Deprecated + public static final int TYPE_DATABASE_GREENPLUM = 31; + + /** + * Connection to a MonetDB database + * + * @deprecated + */ + @Deprecated + public static final int TYPE_DATABASE_MONETDB = 32; + + /** + * Connection to a KingbaseES database + * + * @deprecated + */ + @Deprecated + public static final int TYPE_DATABASE_KINGBASEES = 33; + + /** + * Connection to a Vertica database + * + * @deprecated + */ + @Deprecated + public static final int TYPE_DATABASE_VERTICA = 34; + + /** + * Connection to a Neoview database + * + * @deprecated + */ + @Deprecated + public static final int TYPE_DATABASE_NEOVIEW = 35; + + /** + * Connection to a LucidDB database + * + * @deprecated + */ + @Deprecated + public static final int TYPE_DATABASE_LUCIDDB = 36; + + /** + * Connection to an Infobright database + * + * @deprecated + */ + @Deprecated + public static final int TYPE_DATABASE_INFOBRIGHT = 37; + + /** + * Connect natively through JDBC thin driver to the database. + */ + public static final int TYPE_ACCESS_NATIVE = 0; + + /** + * Connect to the database using ODBC. + */ + public static final int TYPE_ACCESS_ODBC = 1; + + /** + * Connect to the database using OCI. (Oracle only) + */ + public static final int TYPE_ACCESS_OCI = 2; + + /** + * Connect to the database using plugin specific method. (SAP ERP) + */ + public static final int TYPE_ACCESS_PLUGIN = 3; + + /** + * Connect to the database using JNDI. + */ + public static final int TYPE_ACCESS_JNDI = 4; + + /** + * Short description of the access type, used in XML and the repository. + */ + public static final String[] dbAccessTypeCode = {"Native", "ODBC", "OCI", "Plugin", "JNDI", ",",}; + + /** + * Longer description for user interactions. + */ + public static final String[] dbAccessTypeDesc = { + "Native (JDBC)", "ODBC", "OCI", "Plugin specific access method", "JNDI", "Custom",}; + + /** + * Use this length in a String value to indicate that you want to use a CLOB in stead of a normal text field. + */ + public static final int CLOB_LENGTH = 9999999; + + /** + * The value to store in the attributes so that an empty value doesn't get lost... + */ + public static final String EMPTY_OPTIONS_STRING = "><"; + + /** + * Construct a new database connections. Note that not all these parameters are not always mandatory. + * + * @param name The database name + * @param type The type of database + * @param access The type of database access + * @param host The hostname or IP address + * @param db The database name + * @param port The port on which the database listens. + * @param user The username + * @param pass The password + */ + public DatabaseMeta(String name, String type, String access, String host, String db, String port, String user, + String pass) { + setValues(name, type, access, host, db, port, user, pass); + addOptions(); + } + + /** + * Create an empty database connection + */ + public DatabaseMeta() { + setDefault(); + addOptions(); + } + + /** + * Set default values for an Oracle database. + */ + public void setDefault() { + setValues("", "Oracle", "Native", "", "", "1521", "", ""); + } + + /** + * Add a list of common options for some databases. + */ + public void addOptions() { + PluginInterface mySqlPlugin = PluginRegistry.getInstance().getPlugin(DatabasePluginType.class, "MYSQL"); + PluginInterface infoBrightPlugin = + PluginRegistry.getInstance().getPlugin(DatabasePluginType.class, new InfobrightDatabaseMeta()); + + String mySQL = mySqlPlugin.getIds()[0]; + + addExtraOption(mySQL, "defaultFetchSize", "500"); + addExtraOption(mySQL, "useCursorFetch", "true"); + + String infoBright = infoBrightPlugin.getIds()[0]; + + addExtraOption(infoBright, "characterEncoding", "UTF-8"); + + // Modern databases support this, try it by default... + // + setSupportsBooleanDataType(true); + setSupportsTimestampDataType(true); + } + + /** + * @return the system dependend database interface for this database metadata definition + */ + public DatabaseInterface getDatabaseInterface() { + return databaseInterface; + } + + /** + * Set the system dependend database interface for this database metadata definition + * + * @param databaseInterface the system dependend database interface + */ + public void setDatabaseInterface(DatabaseInterface databaseInterface) { + this.databaseInterface = databaseInterface; + } + + /** + * Search for the right type of DatabaseInterface object and clone it. + * + * @param databaseType the type of DatabaseInterface to look for (description) + * @return The requested DatabaseInterface + * @throws KettleDatabaseException when the type could not be found or referenced. + */ + public static final DatabaseInterface getDatabaseInterface(String databaseType) throws KettleDatabaseException { + DatabaseInterface di = findDatabaseInterface(databaseType); + if (di == null) { + throw new KettleDatabaseException(BaseMessages.getString( + PKG, "DatabaseMeta.Error.DatabaseInterfaceNotFound", databaseType)); + } + return (DatabaseInterface) di.clone(); + } + + /** + * Search for the right type of DatabaseInterface object and return it. + * + * @param databaseTypeDesc the type of DatabaseInterface to look for (id or description) + * @return The requested DatabaseInterface + * @throws KettleDatabaseException when the type could not be found or referenced. + */ + private static final DatabaseInterface findDatabaseInterface(String databaseTypeDesc) throws KettleDatabaseException { + PluginRegistry registry = PluginRegistry.getInstance(); + PluginInterface plugin = registry.getPlugin(DatabasePluginType.class, databaseTypeDesc); + if (plugin == null) { + plugin = registry.findPluginWithName(DatabasePluginType.class, databaseTypeDesc); + } + + if (plugin == null) { + throw new KettleDatabaseException("database type with plugin id [" + + databaseTypeDesc + "] couldn't be found!"); + } + + return getDatabaseInterfacesMap().get(plugin.getIds()[0]); + } + + /** + * Returns the database ID of this database connection if a repository was used before. + * + * @return the ID of the db connection. + */ + @Override + public ObjectId getObjectId() { + return databaseInterface.getObjectId(); + } + + @Override + public void setObjectId(ObjectId id) { + databaseInterface.setObjectId(id); + } + + @Override + public Object clone() { + DatabaseMeta databaseMeta = new DatabaseMeta(); + databaseMeta.replaceMeta(this); + databaseMeta.setObjectId(null); + return databaseMeta; + } + + public void replaceMeta(DatabaseMeta databaseMeta) { + this.setValues( + databaseMeta.getName(), databaseMeta.getPluginId(), databaseMeta.getAccessTypeDesc(), databaseMeta + .getHostname(), databaseMeta.getDatabaseName(), databaseMeta.getDatabasePortNumberString(), + databaseMeta.getUsername(), databaseMeta.getPassword()); + this.setServername(databaseMeta.getServername()); + this.setDataTablespace(databaseMeta.getDataTablespace()); + this.setIndexTablespace(databaseMeta.getIndexTablespace()); + + this.databaseInterface = (DatabaseInterface) databaseMeta.databaseInterface.clone(); + + this.setObjectId(databaseMeta.getObjectId()); + this.setChanged(); + } + + public void setValues(String name, String type, String access, String host, String db, String port, + String user, String pass) { + try { + databaseInterface = getDatabaseInterface(type); + } catch (KettleDatabaseException kde) { + throw new RuntimeException("Database type not found!", kde); + } + + setName(name); + setAccessType(getAccessType(access)); + setHostname(host); + setDBName(db); + setDBPort(port); + setUsername(user); + setPassword(pass); + setServername(null); + setChanged(false); + } + + public void setDatabaseType(String type) { + DatabaseInterface oldInterface = databaseInterface; + + try { + databaseInterface = getDatabaseInterface(type); + } catch (KettleDatabaseException kde) { + throw new RuntimeException("Database type [" + type + "] not found!", kde); + } + + setName(oldInterface.getName()); + setDisplayName(oldInterface.getDisplayName()); + setAccessType(oldInterface.getAccessType()); + setHostname(oldInterface.getHostname()); + setDBName(oldInterface.getDatabaseName()); + setDBPort(oldInterface.getDatabasePortNumberString()); + setUsername(oldInterface.getUsername()); + setPassword(oldInterface.getPassword()); + setServername(oldInterface.getServername()); + setDataTablespace(oldInterface.getDataTablespace()); + setIndexTablespace(oldInterface.getIndexTablespace()); + setChanged(oldInterface.isChanged()); + } + + public void setValues(DatabaseMeta info) { + databaseInterface = (DatabaseInterface) info.databaseInterface.clone(); + } + + /** + * Sets the name of the database connection. This name should be unique in a transformation and in general in a single + * repository. + * + * @param name The name of the database connection + */ + @Override + public void setName(String name) { + databaseInterface.setName(name); + } + + /** + * Returns the name of the database connection + * + * @return The name of the database connection + */ + @Override + public String getName() { + return databaseInterface.getName(); + } + + public void setDisplayName(String displayName) { + databaseInterface.setDisplayName(displayName); + } + + /** + * Returns the name of the database connection + * + * @return The name of the database connection + */ + public String getDisplayName() { + return databaseInterface.getDisplayName(); + } + + /** + * Returns the type of database, one of + *

+ * TYPE_DATABASE_MYSQL + *

+ * TYPE_DATABASE_ORACLE + *

+ * TYPE_DATABASE_... + *

+ * + * @return the database type + * @Deprecated public int getDatabaseType() { return databaseInterface.getDatabaseType(); } + */ + + /** + * The plugin ID of the database interface + */ + public String getPluginId() { + return databaseInterface.getPluginId(); + } + + /* + * Sets the type of database. + * + * @param db_type The database type public void setDatabaseType(int db_type) { databaseInterface this.databaseType = + * db_type; } + */ + + /** + * Return the type of database access. One of + *

+ * TYPE_ACCESS_NATIVE + *

+ * TYPE_ACCESS_ODBC + *

+ * TYPE_ACCESS_OCI + *

+ * + * @return The type of database access. + */ + public int getAccessType() { + return databaseInterface.getAccessType(); + } + + /** + * Set the type of database access. + * + * @param access_type The access type. + */ + public void setAccessType(int access_type) { + databaseInterface.setAccessType(access_type); + } + + /** + * Returns a short description of the type of database. + * + * @return A short description of the type of database. + * @deprecated This is actually the plugin ID + */ + @Deprecated + public String getDatabaseTypeDesc() { + return getPluginId(); + } + + /** + * Gets you a short description of the type of database access. + * + * @return A short description of the type of database access. + */ + public String getAccessTypeDesc() { + return dbAccessTypeCode[getAccessType()]; + } + + /** + * Return the hostname of the machine on which the database runs. + * + * @return The hostname of the database. + */ + public String getHostname() { + return databaseInterface.getHostname(); + } + + /** + * Sets the hostname of the machine on which the database runs. + * + * @param hostname The hostname of the machine on which the database runs. + */ + public void setHostname(String hostname) { + databaseInterface.setHostname(hostname); + } + + /** + * Return the port on which the database listens as a String. Allows for parameterisation. + * + * @return The database port. + */ + public String getDatabasePortNumberString() { + return databaseInterface.getDatabasePortNumberString(); + } + + /** + * Sets the port on which the database listens. + * + * @param db_port The port number on which the database listens + */ + public void setDBPort(String db_port) { + databaseInterface.setDatabasePortNumberString(db_port); + } + + /** + * Return the name of the database. + * + * @return The database name. + */ + public String getDatabaseName() { + return databaseInterface.getDatabaseName(); + } + + /** + * Set the name of the database. + * + * @param databaseName The new name of the database + */ + public void setDBName(String databaseName) { + databaseInterface.setDatabaseName(databaseName); + } + + /** + * Get the username to log into the database on this connection. + * + * @return The username to log into the database on this connection. + */ + public String getUsername() { + return databaseInterface.getUsername(); + } + + /** + * Sets the username to log into the database on this connection. + * + * @param username The username + */ + public void setUsername(String username) { + databaseInterface.setUsername(username); + } + + /** + * Get the password to log into the database on this connection. + * + * @return the password to log into the database on this connection. + */ + public String getPassword() { + return databaseInterface.getPassword(); + } + + /** + * Sets the password to log into the database on this connection. + * + * @param password the password to log into the database on this connection. + */ + public void setPassword(String password) { + databaseInterface.setPassword(password); + } + + /** + * @param servername the Informix servername + */ + public void setServername(String servername) { + databaseInterface.setServername(servername); + } + + /** + * @return the Informix servername + */ + public String getServername() { + return databaseInterface.getServername(); + } + + public String getDataTablespace() { + return databaseInterface.getDataTablespace(); + } + + public void setDataTablespace(String data_tablespace) { + databaseInterface.setDataTablespace(data_tablespace); + } + + public String getIndexTablespace() { + return databaseInterface.getIndexTablespace(); + } + + public void setIndexTablespace(String index_tablespace) { + databaseInterface.setIndexTablespace(index_tablespace); + } + + public void setChanged() { + setChanged(true); + } + + public void setChanged(boolean ch) { + databaseInterface.setChanged(ch); + } + + public boolean hasChanged() { + return databaseInterface.isChanged(); + } + + public void clearChanged() { + databaseInterface.setChanged(false); + } + + @Override + public String toString() { + return getDisplayName(); + } + + /** + * @return The extra attributes for this database connection + */ + public Properties getAttributes() { + return databaseInterface.getAttributes(); + } + + /** + * Set extra attributes on this database connection + * + * @param attributes The extra attributes to set on this database connection. + */ + public void setAttributes(Properties attributes) { + databaseInterface.setAttributes(attributes); + } + + /** + * Constructs a new database using an XML string snippet. It expects the snippet to be enclosed in + * connection tags. + * + * @param xml The XML string to parse + * @throws KettleXMLException in case there is an XML parsing error + */ + public DatabaseMeta(String xml) throws KettleXMLException { + this(XMLHandler.getSubNode(XMLHandler.loadXMLString(xml), "connection")); + } + + /** + * Reads the information from an XML Node into this new database connection. + * + * @param con The Node to read the data from + * @throws KettleXMLException + */ + public DatabaseMeta(Node con) throws KettleXMLException { + this(); + + try { + String type = XMLHandler.getTagValue(con, "type"); + try { + databaseInterface = getDatabaseInterface(type); + + } catch (KettleDatabaseException kde) { + throw new KettleXMLException("Unable to create new database interface", kde); + } + + setName(XMLHandler.getTagValue(con, "name")); + setDisplayName(getName()); + setHostname(XMLHandler.getTagValue(con, "server")); + String acc = XMLHandler.getTagValue(con, "access"); + setAccessType(getAccessType(acc)); + + setDBName(XMLHandler.getTagValue(con, "database")); + + // The DB port is read here too for backward compatibility! getName() + // + setDBPort(XMLHandler.getTagValue(con, "port")); + setUsername(XMLHandler.getTagValue(con, "username")); + setPassword(Encr.decryptPasswordOptionallyEncrypted(XMLHandler.getTagValue(con, "password"))); + setServername(XMLHandler.getTagValue(con, "servername")); + setDataTablespace(XMLHandler.getTagValue(con, "data_tablespace")); + setIndexTablespace(XMLHandler.getTagValue(con, "index_tablespace")); + + setReadOnly(Boolean.valueOf(XMLHandler.getTagValue(con, "read_only"))); + + // Also, read the database attributes... + Node attrsnode = XMLHandler.getSubNode(con, "attributes"); + if (attrsnode != null) { + List attrnodes = XMLHandler.getNodes(attrsnode, "attribute"); + for (Node attrnode : attrnodes) { + String code = XMLHandler.getTagValue(attrnode, "code"); + String attribute = XMLHandler.getTagValue(attrnode, "attribute"); + if (code != null && attribute != null) { + getAttributes().put(code, attribute); + } + getDatabasePortNumberString(); + } + } + } catch (Exception e) { + throw new KettleXMLException("Unable to load database connection info from XML node", e); + } + } + + @Override + public String getXML() { + StringBuffer retval = new StringBuffer(250); + + retval.append(" <").append(XML_TAG).append('>').append(Const.CR); + retval.append(" ").append(XMLHandler.addTagValue("name", getName())); + retval.append(" ").append(XMLHandler.addTagValue("server", getHostname())); + retval.append(" ").append(XMLHandler.addTagValue("type", getPluginId())); + retval.append(" ").append(XMLHandler.addTagValue("access", getAccessTypeDesc())); + retval.append(" ").append(XMLHandler.addTagValue("database", getDatabaseName())); + retval.append(" ").append(XMLHandler.addTagValue("port", getDatabasePortNumberString())); + retval.append(" ").append(XMLHandler.addTagValue("username", getUsername())); + retval.append(" ").append( + XMLHandler.addTagValue("password", Encr.encryptPasswordIfNotUsingVariables(getPassword()))); + retval.append(" ").append(XMLHandler.addTagValue("servername", getServername())); + retval.append(" ").append(XMLHandler.addTagValue("data_tablespace", getDataTablespace())); + retval.append(" ").append(XMLHandler.addTagValue("index_tablespace", getIndexTablespace())); + + // only write the tag out if it is set to true + if (isReadOnly()) { + retval.append(" ").append(XMLHandler.addTagValue("read_only", Boolean.toString(isReadOnly()))); + } + + retval.append(" ").append(Const.CR); + + List list = new ArrayList(); + Set keySet = getAttributes().keySet(); + for (Object object : keySet) { + list.add((String) object); + } + Collections.sort(list); // Sort the entry-sets to make sure we can compare XML strings: if the order is different, + // the XML is different. + + for (Iterator iter = list.iterator(); iter.hasNext(); ) { + String code = iter.next(); + String attribute = getAttributes().getProperty(code); + if (!Const.isEmpty(attribute)) { + retval.append(" " + + XMLHandler.addTagValue("code", code, false) + + XMLHandler.addTagValue("attribute", attribute, false) + "" + Const.CR); + } + } + retval.append(" ").append(Const.CR); + + retval.append(" ").append(Const.CR); + return retval.toString(); + } + + @Override + public int hashCode() { + return getName().hashCode(); // name of connection is unique! + } + + @Override + public boolean equals(Object obj) { + return obj instanceof DatabaseMeta && getName().equals(((DatabaseMeta) obj).getName()); + } + + public String getURL() throws KettleDatabaseException { + return getURL(null); + } + + public String getURL(String partitionId) throws KettleDatabaseException { + // First see if we're not doing any JNDI... + // + /* + * This doesn't make much sense here - we check but do nothing? if ( getAccessType() == TYPE_ACCESS_JNDI ) { // We + * can't really determine the URL here. // // } + */ + String baseUrl; + String hostname; + String port; + String databaseName; + + if (isPartitioned() && !Const.isEmpty(partitionId)) { + // Get the cluster information... + PartitionDatabaseMeta partition = getPartitionMeta(partitionId); + hostname = environmentSubstitute(partition.getHostname()); + port = environmentSubstitute(partition.getPort()); + databaseName = environmentSubstitute(partition.getDatabaseName()); + } else { + hostname = environmentSubstitute(getHostname()); + port = environmentSubstitute(getDatabasePortNumberString()); + databaseName = environmentSubstitute(getDatabaseName()); + } + baseUrl = databaseInterface.getURL(environmentSubstitute(hostname), environmentSubstitute(port), + environmentSubstitute(databaseName)); + StringBuffer url = new StringBuffer(environmentSubstitute(baseUrl)); + + if (databaseInterface.supportsOptionsInURL()) { + // OK, now add all the options... + String optionIndicator = getExtraOptionIndicator(); + String optionSeparator = getExtraOptionSeparator(); + String valueSeparator = getExtraOptionValueSeparator(); + + Map map = getExtraOptions(); + if (map.size() > 0) { + Iterator iterator = map.keySet().iterator(); + boolean first = true; + while (iterator.hasNext()) { + String typedParameter = iterator.next(); + int dotIndex = typedParameter.indexOf('.'); + if (dotIndex >= 0) { + String typeCode = typedParameter.substring(0, dotIndex); + String parameter = typedParameter.substring(dotIndex + 1); + String value = map.get(typedParameter); + + // Only add to the URL if it's the same database type code... + // + if (databaseInterface.getPluginId().equals(typeCode)) { + if (first && url.indexOf(valueSeparator) == -1) { + url.append(optionIndicator); + } else { + url.append(optionSeparator); + } + + url.append(parameter); + if (!Const.isEmpty(value) && !value.equals(EMPTY_OPTIONS_STRING)) { + url.append(valueSeparator).append(value); + } + first = false; + } + } + } + } + } + // else { + // We need to put all these options in a Properties file later (Oracle & Co.) + // This happens at connect time... + // } + + return url.toString(); + } + + public Properties getConnectionProperties() { + Properties properties = new Properties(); + + Map map = getExtraOptions(); + if (map.size() > 0) { + Iterator iterator = map.keySet().iterator(); + while (iterator.hasNext()) { + String typedParameter = iterator.next(); + int dotIndex = typedParameter.indexOf('.'); + if (dotIndex >= 0) { + String typeCode = typedParameter.substring(0, dotIndex); + String parameter = typedParameter.substring(dotIndex + 1); + String value = map.get(typedParameter); + + // Only add to the URL if it's the same database type code... + // + if (databaseInterface.getPluginId().equals(typeCode)) { + if (value != null && value.equals(EMPTY_OPTIONS_STRING)) { + value = ""; + } + properties.put(parameter, environmentSubstitute(Const.NVL(value, ""))); + } + } + } + } + + return properties; + } + + public String getExtraOptionIndicator() { + return databaseInterface.getExtraOptionIndicator(); + } + + /** + * @return The extra option separator in database URL for this platform (usually this is semicolon ; ) + */ + public String getExtraOptionSeparator() { + return databaseInterface.getExtraOptionSeparator(); + } + + /** + * @return The extra option value separator in database URL for this platform (usually this is the equal sign = ) + */ + public String getExtraOptionValueSeparator() { + return databaseInterface.getExtraOptionValueSeparator(); + } + + /** + * Add an extra option to the attributes list + * + * @param databaseTypeCode The database type code for which the option applies + * @param option The option to set + * @param value The value of the option + */ + public void addExtraOption(String databaseTypeCode, String option, String value) { + databaseInterface.addExtraOption(databaseTypeCode, option, value); + } + + public void applyDefaultOptions(DatabaseInterface databaseInterface) { + final Map extraOptions = getExtraOptions(); + + final Map defaultOptions = databaseInterface.getDefaultOptions(); + for (String option : defaultOptions.keySet()) { + String value = defaultOptions.get(option); + String[] split = option.split("[.]", 2); + if (!extraOptions.containsKey(option) && split.length == 2) { + addExtraOption(split[0], split[1], value); + } + } + } + + /** + * @return true if the database supports transactions + * @deprecated because the same database can support transactions or not. It all depends on the database setup. + * Therefor, we look at the database metadata DatabaseMetaData.supportsTransactions() in stead of this. + */ + @Deprecated + public boolean supportsTransactions() { + return databaseInterface.supportsTransactions(); + } + + public boolean supportsAutoinc() { + return databaseInterface.supportsAutoInc(); + } + + public boolean supportsSequences() { + return databaseInterface.supportsSequences(); + } + + public String getSQLSequenceExists(String sequenceName) { + return databaseInterface.getSQLSequenceExists(sequenceName); + } + + public boolean supportsBitmapIndex() { + return databaseInterface.supportsBitmapIndex(); + } + + public boolean supportsSetLong() { + return databaseInterface.supportsSetLong(); + } + + /** + * @return true if the database supports schemas + */ + public boolean supportsSchemas() { + return databaseInterface.supportsSchemas(); + } + + /** + * @return true if the database supports catalogs + */ + public boolean supportsCatalogs() { + return databaseInterface.supportsCatalogs(); + } + + /** + * @return true when the database engine supports empty transaction. (for example Informix does not on a non-ANSI + * database type!) + */ + public boolean supportsEmptyTransactions() { + return databaseInterface.supportsEmptyTransactions(); + } + + /** + * See if this database supports the setCharacterStream() method on a PreparedStatement. + * + * @return true if we can set a Stream on a field in a PreparedStatement. False if not. + */ + public boolean supportsSetCharacterStream() { + return databaseInterface.supportsSetCharacterStream(); + } + + /** + * Get the maximum length of a text field for this database connection. This includes optional CLOB, Memo and Text + * fields. (the maximum!) + * + * @return The maximum text field length for this database type. (mostly CLOB_LENGTH) + */ + public int getMaxTextFieldLength() { + return databaseInterface.getMaxTextFieldLength(); + } + + public static final int getAccessType(String dbaccess) { + int i; + + if (dbaccess == null) { + return TYPE_ACCESS_NATIVE; + } + + for (i = 0; i < dbAccessTypeCode.length; i++) { + if (dbAccessTypeCode[i].equalsIgnoreCase(dbaccess)) { + return i; + } + } + for (i = 0; i < dbAccessTypeDesc.length; i++) { + if (dbAccessTypeDesc[i].equalsIgnoreCase(dbaccess)) { + return i; + } + } + + return TYPE_ACCESS_NATIVE; + } + + public static final String getAccessTypeDesc(int dbaccess) { + if (dbaccess < 0) { + return null; + } + if (dbaccess > dbAccessTypeCode.length) { + return null; + } + + return dbAccessTypeCode[dbaccess]; + } + + public static final String getAccessTypeDescLong(int dbaccess) { + if (dbaccess < 0) { + return null; + } + if (dbaccess > dbAccessTypeDesc.length) { + return null; + } + + return dbAccessTypeDesc[dbaccess]; + } + + public static final DatabaseInterface[] getDatabaseInterfaces() { + List list = new ArrayList(getDatabaseInterfacesMap().values()); + return list.toArray(new DatabaseInterface[list.size()]); + } + + /** + * Clear the database interfaces map. The map is cached by getDatabaseInterfacesMap(), but in some instances it may + * need to be reloaded (such as adding/updating Database plugins). After calling clearDatabaseInterfacesMap(), the + * next call to getDatabaseInterfacesMap() will reload the map. + */ + public static final void clearDatabaseInterfacesMap() { + allDatabaseInterfaces = null; + } + + private static final Future> createDatabaseInterfacesMap() { + return ExecutorUtil.getExecutor().submit(new Callable>() { + private Map doCreate() { + LogChannelInterface log = LogChannel.GENERAL; + PluginRegistry registry = PluginRegistry.getInstance(); + + List plugins = registry.getPlugins(DatabasePluginType.class); + HashMap tmpAllDatabaseInterfaces = new HashMap(); + for (PluginInterface plugin : plugins) { + try { + DatabaseInterface databaseInterface = (DatabaseInterface) registry.loadClass(plugin); + databaseInterface.setPluginId(plugin.getIds()[0]); + databaseInterface.setPluginName(plugin.getName()); + tmpAllDatabaseInterfaces.put(plugin.getIds()[0], databaseInterface); + } catch (KettlePluginException cnfe) { + System.out.println("Could not create connection entry for " + + plugin.getName() + ". " + cnfe.getCause().getClass().getName()); + log.logError("Could not create connection entry for " + + plugin.getName() + ". " + cnfe.getCause().getClass().getName()); + if (log.isDebug()) { + log.logDebug("Debug-Error loading plugin: " + plugin, cnfe); + } + } catch (Exception e) { + log.logError("Error loading plugin: " + plugin, e); + } + } + return Collections.unmodifiableMap(tmpAllDatabaseInterfaces); + } + + @Override + public Map call() throws Exception { + return doCreate(); + } + }); + } + + public static final Map getDatabaseInterfacesMap() { + Future> allDatabaseInterfaces = DatabaseMeta.allDatabaseInterfaces; + while (allDatabaseInterfaces == null) { + DatabaseMeta.allDatabaseInterfaces = createDatabaseInterfacesMap(); + allDatabaseInterfaces = DatabaseMeta.allDatabaseInterfaces; + } + try { + return allDatabaseInterfaces.get(); + } catch (Exception e) { + clearDatabaseInterfacesMap(); + // doCreate() above doesn't declare any exceptions so anything that comes out SHOULD be a runtime exception + if (e instanceof RuntimeException) { + throw (RuntimeException) e; + } else { + throw new RuntimeException(e); + } + } + } + + public static final int[] getAccessTypeList(String dbTypeDesc) { + try { + DatabaseInterface di = findDatabaseInterface(dbTypeDesc); + return di.getAccessTypeList(); + } catch (KettleDatabaseException kde) { + return null; + } + } + + public static final int getPortForDBType(String strtype, String straccess) { + try { + DatabaseInterface di = getDatabaseInterface(strtype); + di.setAccessType(getAccessType(straccess)); + return di.getDefaultDatabasePort(); + } catch (KettleDatabaseException kde) { + return -1; + } + } + + public int getDefaultDatabasePort() { + return databaseInterface.getDefaultDatabasePort(); + } + + public int getNotFoundTK(boolean use_autoinc) { + return databaseInterface.getNotFoundTK(use_autoinc); + } + + public String getDriverClass() { + return environmentSubstitute(databaseInterface.getDriverClass()); + } + + public String stripCR(String sbsql) { + if (sbsql == null) { + return null; + } + return stripCR(new StringBuffer(sbsql)); + } + + public String stripCR(StringBuffer sbsql) { + // DB2 Can't handle \n in SQL Statements... + if (!supportsNewLinesInSQL()) { + // Remove CR's + for (int i = sbsql.length() - 1; i >= 0; i--) { + if (sbsql.charAt(i) == '\n' || sbsql.charAt(i) == '\r') { + sbsql.setCharAt(i, ' '); + } + } + } + + return sbsql.toString(); + } + + public String getSeqNextvalSQL(String sequenceName) { + return databaseInterface.getSQLNextSequenceValue(sequenceName); + } + + public String getSQLCurrentSequenceValue(String sequenceName) { + return databaseInterface.getSQLCurrentSequenceValue(sequenceName); + } + + public boolean isFetchSizeSupported() { + return databaseInterface.isFetchSizeSupported(); + } + + /** + * Indicates the need to insert a placeholder (0) for auto increment fields. + * + * @return true if we need a placeholder for auto increment fields in insert statements. + */ + public boolean needsPlaceHolder() { + return databaseInterface.needsPlaceHolder(); + } + + public String getFunctionSum() { + return databaseInterface.getFunctionSum(); + } + + public String getFunctionAverage() { + return databaseInterface.getFunctionAverage(); + } + + public String getFunctionMaximum() { + return databaseInterface.getFunctionMaximum(); + } + + public String getFunctionMinimum() { + return databaseInterface.getFunctionMinimum(); + } + + public String getFunctionCount() { + return databaseInterface.getFunctionCount(); + } + + /** + * Check the database connection parameters and give back an array of remarks + * + * @return an array of remarks Strings + */ + public String[] checkParameters() { + ArrayList remarks = new ArrayList(); + + if (getDatabaseInterface() == null) { + remarks.add("No database type was choosen"); + } + + if (getName() == null || getName().length() == 0) { + remarks.add("Please give this database connection a name"); + } + + if (!isPartitioned() + && !(getDatabaseInterface() instanceof SAPR3DatabaseMeta + || getDatabaseInterface() instanceof GenericDatabaseMeta)) { + if (getDatabaseName() == null || getDatabaseName().length() == 0) { + remarks.add("Please specify the name of the database"); + } + } + + return remarks.toArray(new String[remarks.size()]); + } + + /** + * This is now replaced with getQuotedSchemaTableCombination(), enforcing the use of the quoteFields call + * + * @param schemaName + * @param tableName + * @return + * @deprecated please use getQuotedSchemaTableCombination() + */ + @Deprecated + public String getSchemaTableCombination(String schemaName, String tableName) { + return getQuotedSchemaTableCombination(schemaName, tableName); + } + + /** + * Calculate the schema-table combination, usually this is the schema and table separated with a dot. (schema.table) + * + * @param schemaName the schema-name or null if no schema is used. + * @param tableName the table name + * @return the schemaname-tablename combination + */ + public String getQuotedSchemaTableCombination(String schemaName, String tableName) { + if (Const.isEmpty(schemaName)) { + if (Const.isEmpty(getPreferredSchemaName())) { + return quoteField(environmentSubstitute(tableName)); // no need to look further + } else { + return databaseInterface.getSchemaTableCombination( + quoteField(environmentSubstitute(getPreferredSchemaName())), + quoteField(environmentSubstitute(tableName))); + } + } else { + return databaseInterface.getSchemaTableCombination( + quoteField(environmentSubstitute(schemaName)), quoteField(environmentSubstitute(tableName))); + } + } + + public boolean isClob(ValueMetaInterface v) { + boolean retval = true; + + if (v == null || v.getLength() < DatabaseMeta.CLOB_LENGTH) { + retval = false; + } else { + return true; + } + return retval; + } + + public String getFieldDefinition(ValueMetaInterface v, String tk, String pk, boolean use_autoinc) { + return getFieldDefinition(v, tk, pk, use_autoinc, true, true); + } + + public String getFieldDefinition(ValueMetaInterface v, String tk, String pk, boolean use_autoinc, + boolean add_fieldname, boolean add_cr) { + + String definition = + v.getDatabaseColumnTypeDefinition(databaseInterface, tk, pk, use_autoinc, add_fieldname, add_cr); + if (!Const.isEmpty(definition)) { + return definition; + } + + return databaseInterface.getFieldDefinition(v, tk, pk, use_autoinc, add_fieldname, add_cr); + } + + public String getLimitClause(int nrRows) { + return databaseInterface.getLimitClause(nrRows); + } + + /** + * @param tableName The table or schema-table combination. We expect this to be quoted properly already! + * @return the SQL for to get the fields of this table. + */ + public String getSQLQueryFields(String tableName) { + return databaseInterface.getSQLQueryFields(tableName); + } + + public String getAddColumnStatement(String tablename, ValueMetaInterface v, String tk, boolean use_autoinc, + String pk, boolean semicolon) { + String retval = databaseInterface.getAddColumnStatement(tablename, v, tk, use_autoinc, pk, semicolon); + retval += Const.CR; + if (semicolon) { + retval += ";" + Const.CR; + } + return retval; + } + + public String getDropColumnStatement(String tablename, ValueMetaInterface v, String tk, boolean use_autoinc, + String pk, boolean semicolon) { + String retval = databaseInterface.getDropColumnStatement(tablename, v, tk, use_autoinc, pk, semicolon); + retval += Const.CR; + if (semicolon) { + retval += ";" + Const.CR; + } + return retval; + } + + public String getModifyColumnStatement(String tablename, ValueMetaInterface v, String tk, boolean use_autoinc, + String pk, boolean semicolon) { + String retval = databaseInterface.getModifyColumnStatement(tablename, v, tk, use_autoinc, pk, semicolon); + retval += Const.CR; + if (semicolon) { + retval += ";" + Const.CR; + } + + return retval; + } + + /** + * @return an array of reserved words for the database type... + */ + public String[] getReservedWords() { + return databaseInterface.getReservedWords(); + } + + /** + * @return true if reserved words need to be double quoted ("password", "select", ...) + */ + public boolean quoteReservedWords() { + return databaseInterface.quoteReservedWords(); + } + + /** + * @return The start quote sequence, mostly just double quote, but sometimes [, ... + */ + public String getStartQuote() { + return databaseInterface.getStartQuote(); + } + + /** + * @return The end quote sequence, mostly just double quote, but sometimes ], ... + */ + public String getEndQuote() { + return databaseInterface.getEndQuote(); + } + + /** + * Returns a quoted field if this is needed: contains spaces, is a reserved word, ... + * + * @param field The fieldname to check for quoting + * @return The quoted field (if this is needed. + */ + public String quoteField(String field) { + if (Const.isEmpty(field)) { + return null; + } + + if (isForcingIdentifiersToLowerCase()) { + field = field.toLowerCase(); + } else if (isForcingIdentifiersToUpperCase()) { + field = field.toUpperCase(); + } + + // If the field already contains quotes, we don't touch it anymore, just return the same string... + if (field.indexOf(getStartQuote()) >= 0 || field.indexOf(getEndQuote()) >= 0) { + return field; + } + + if (isReservedWord(field) && quoteReservedWords()) { + return handleCase(getStartQuote() + field + getEndQuote()); + } else { + if (databaseInterface.isQuoteAllFields() + || hasSpacesInField(field) || hasSpecialCharInField(field) || hasDotInField(field)) { + return getStartQuote() + field + getEndQuote(); + } else { + return field; + } + } + } + + private String handleCase(String field) { + if (preserveReservedCase()) { + return field; + } else { + if (databaseInterface.isDefaultingToUppercase()) { + return field.toUpperCase(); + } else { + return field.toLowerCase(); + } + } + } + + /** + * Determines whether or not this field is in need of quoting:
+ * - When the fieldname contains spaces
+ * - When the fieldname is a reserved word
+ * + * @param fieldname the fieldname to check if there is a need for quoting + * @return true if the fieldname needs to be quoted. + */ + public boolean isInNeedOfQuoting(String fieldname) { + return isReservedWord(fieldname) || hasSpacesInField(fieldname); + } + + /** + * Returns true if the string specified is a reserved word on this database type. + * + * @param word The word to check + * @return true if word is a reserved word on this database. + */ + public boolean isReservedWord(String word) { + String[] reserved = getReservedWords(); + if (Const.indexOfString(word, reserved) >= 0) { + return true; + } + return false; + } + + /** + * Detects if a field has spaces in the name. We need to quote the field in that case. + * + * @param fieldname The fieldname to check for spaces + * @return true if the fieldname contains spaces + */ + public boolean hasSpacesInField(String fieldname) { + if (fieldname == null) { + return false; + } + if (fieldname.indexOf(' ') >= 0) { + return true; + } + return false; + } + + /** + * Detects if a field has spaces in the name. We need to quote the field in that case. + * + * @param fieldname The fieldname to check for spaces + * @return true if the fieldname contains spaces + */ + public boolean hasSpecialCharInField(String fieldname) { + if (fieldname == null) { + return false; + } + if (fieldname.indexOf('/') >= 0) { + return true; + } + if (fieldname.indexOf('-') >= 0) { + return true; + } + if (fieldname.indexOf('+') >= 0) { + return true; + } + if (fieldname.indexOf(',') >= 0) { + return true; + } + if (fieldname.indexOf('*') >= 0) { + return true; + } + if (fieldname.indexOf('(') >= 0) { + return true; + } + if (fieldname.indexOf(')') >= 0) { + return true; + } + if (fieldname.indexOf('{') >= 0) { + return true; + } + if (fieldname.indexOf('}') >= 0) { + return true; + } + if (fieldname.indexOf('[') >= 0) { + return true; + } + if (fieldname.indexOf(']') >= 0) { + return true; + } + if (fieldname.indexOf('%') >= 0) { + return true; + } + if (fieldname.indexOf('@') >= 0) { + return true; + } + if (fieldname.indexOf('?') >= 0) { + return true; + } + return false; + } + + public boolean hasDotInField(String fieldname) { + if (fieldname == null) { + return false; + } + if (fieldname.indexOf('.') >= 0) { + return true; + } + return false; + } + + /** + * Checks the fields specified for reserved words and quotes them. + * + * @param fields the list of fields to check + * @return true if one or more values have a name that is a reserved word on this database type. + */ + public boolean replaceReservedWords(RowMetaInterface fields) { + boolean hasReservedWords = false; + for (int i = 0; i < fields.size(); i++) { + ValueMetaInterface v = fields.getValueMeta(i); + if (isReservedWord(v.getName())) { + hasReservedWords = true; + v.setName(quoteField(v.getName())); + } + } + return hasReservedWords; + } + + /** + * Checks the fields specified for reserved words + * + * @param fields the list of fields to check + * @return The nr of reserved words for this database. + */ + public int getNrReservedWords(RowMetaInterface fields) { + int nrReservedWords = 0; + for (int i = 0; i < fields.size(); i++) { + ValueMetaInterface v = fields.getValueMeta(i); + if (isReservedWord(v.getName())) { + nrReservedWords++; + } + } + return nrReservedWords; + } + + /** + * @return a list of types to get the available tables + */ + public String[] getTableTypes() { + return databaseInterface.getTableTypes(); + } + + /** + * @return a list of types to get the available views + */ + public String[] getViewTypes() { + return databaseInterface.getViewTypes(); + } + + /** + * @return a list of types to get the available synonyms + */ + public String[] getSynonymTypes() { + return databaseInterface.getSynonymTypes(); + } + + /** + * @return true if we need to supply the schema-name to getTables in order to get a correct list of items. + */ + public boolean useSchemaNameForTableList() { + return databaseInterface.useSchemaNameForTableList(); + } + + /** + * @return true if the database supports views + */ + public boolean supportsViews() { + return databaseInterface.supportsViews(); + } + + /** + * @return true if the database supports synonyms + */ + public boolean supportsSynonyms() { + return databaseInterface.supportsSynonyms(); + } + + /** + * @return The SQL on this database to get a list of stored procedures. + */ + public String getSQLListOfProcedures() { + return databaseInterface.getSQLListOfProcedures(); + } + + /** + * @param tableName The tablename to be truncated + * @return The SQL statement to remove all rows from the specified statement, if possible without using transactions + */ + public String getTruncateTableStatement(String schema, String tableName) { + return databaseInterface.getTruncateTableStatement(getQuotedSchemaTableCombination(schema, tableName)); + } + + /** + * @return true if the database rounds floating point numbers to the right precision. For example if the target field + * is number(7,2) the value 12.399999999 is converted into 12.40 + */ + public boolean supportsFloatRoundingOnUpdate() { + return databaseInterface.supportsFloatRoundingOnUpdate(); + } + + /** + * @param tableNames The names of the tables to lock + * @return The SQL commands to lock database tables for write purposes. null is returned in case locking is not + * supported on the target database. + */ + public String getSQLLockTables(String[] tableNames) { + return databaseInterface.getSQLLockTables(tableNames); + } + + /** + * @param tableNames The names of the tables to unlock + * @return The SQL commands to unlock databases tables. null is returned in case locking is not supported on the + * target database. + */ + public String getSQLUnlockTables(String[] tableNames) { + return databaseInterface.getSQLUnlockTables(tableNames); + } + + /** + * @return a feature list for the chosen database type. + */ + public List getFeatureSummary() { + List list = new ArrayList(); + RowMetaAndData r = null; + final String par = "Parameter"; + final String val = "Value"; + + ValueMetaInterface testValue = new ValueMetaString("FIELD"); + testValue.setLength(30); + + if (databaseInterface != null) { + // Type of database + r = new RowMetaAndData(); + r.addValue(par, ValueMetaInterface.TYPE_STRING, "Database type"); + r.addValue(val, ValueMetaInterface.TYPE_STRING, getPluginId()); + list.add(r); + // Type of access + r = new RowMetaAndData(); + r.addValue(par, ValueMetaInterface.TYPE_STRING, "Access type"); + r.addValue(val, ValueMetaInterface.TYPE_STRING, getAccessTypeDesc()); + list.add(r); + // Name of database + r = new RowMetaAndData(); + r.addValue(par, ValueMetaInterface.TYPE_STRING, "Database name"); + r.addValue(val, ValueMetaInterface.TYPE_STRING, getDatabaseName()); + list.add(r); + // server host name + r = new RowMetaAndData(); + r.addValue(par, ValueMetaInterface.TYPE_STRING, "Server hostname"); + r.addValue(val, ValueMetaInterface.TYPE_STRING, getHostname()); + list.add(r); + // Port number + r = new RowMetaAndData(); + r.addValue(par, ValueMetaInterface.TYPE_STRING, "Service port"); + r.addValue(val, ValueMetaInterface.TYPE_STRING, getDatabasePortNumberString()); + list.add(r); + // Username + r = new RowMetaAndData(); + r.addValue(par, ValueMetaInterface.TYPE_STRING, "Username"); + r.addValue(val, ValueMetaInterface.TYPE_STRING, getUsername()); + list.add(r); + // Informix server + r = new RowMetaAndData(); + r.addValue(par, ValueMetaInterface.TYPE_STRING, "Informix server name"); + r.addValue(val, ValueMetaInterface.TYPE_STRING, getServername()); + list.add(r); + // Other properties... + Enumeration keys = getAttributes().keys(); + while (keys.hasMoreElements()) { + String key = (String) keys.nextElement(); + String value = getAttributes().getProperty(key); + r = new RowMetaAndData(); + r.addValue(par, ValueMetaInterface.TYPE_STRING, "Extra attribute [" + key + "]"); + r.addValue(val, ValueMetaInterface.TYPE_STRING, value); + list.add(r); + } + + // driver class + r = new RowMetaAndData(); + r.addValue(par, ValueMetaInterface.TYPE_STRING, "Driver class"); + r.addValue(val, ValueMetaInterface.TYPE_STRING, getDriverClass()); + list.add(r); + // URL + String pwd = getPassword(); + setPassword("password"); // Don't give away the password in the URL! + String url = ""; + try { + url = getURL(); + } catch (Exception e) { + url = ""; + } // SAP etc. + r = new RowMetaAndData(); + r.addValue(par, ValueMetaInterface.TYPE_STRING, "URL"); + r.addValue(val, ValueMetaInterface.TYPE_STRING, url); + list.add(r); + setPassword(pwd); + // SQL: Next sequence value + r = new RowMetaAndData(); + r.addValue(par, ValueMetaInterface.TYPE_STRING, "SQL: next sequence value"); + r.addValue(val, ValueMetaInterface.TYPE_STRING, getSeqNextvalSQL("SEQUENCE")); + list.add(r); + // is set fetch size supported + r = new RowMetaAndData(); + r.addValue(par, ValueMetaInterface.TYPE_STRING, "supported: set fetch size"); + r.addValue(val, ValueMetaInterface.TYPE_STRING, isFetchSizeSupported() ? "Y" : "N"); + list.add(r); + // needs place holder for auto increment + r = new RowMetaAndData(); + r.addValue(par, ValueMetaInterface.TYPE_STRING, "auto increment field needs placeholder"); + r.addValue(val, ValueMetaInterface.TYPE_STRING, needsPlaceHolder() ? "Y" : "N"); + list.add(r); + // Sum function + r = new RowMetaAndData(); + r.addValue(par, ValueMetaInterface.TYPE_STRING, "SUM aggregate function"); + r.addValue(val, ValueMetaInterface.TYPE_STRING, getFunctionSum()); + list.add(r); + // Avg function + r = new RowMetaAndData(); + r.addValue(par, ValueMetaInterface.TYPE_STRING, "AVG aggregate function"); + r.addValue(val, ValueMetaInterface.TYPE_STRING, getFunctionAverage()); + list.add(r); + // Minimum function + r = new RowMetaAndData(); + r.addValue(par, ValueMetaInterface.TYPE_STRING, "MIN aggregate function"); + r.addValue(val, ValueMetaInterface.TYPE_STRING, getFunctionMinimum()); + list.add(r); + // Maximum function + r = new RowMetaAndData(); + r.addValue(par, ValueMetaInterface.TYPE_STRING, "MAX aggregate function"); + r.addValue(val, ValueMetaInterface.TYPE_STRING, getFunctionMaximum()); + list.add(r); + // Count function + r = new RowMetaAndData(); + r.addValue(par, ValueMetaInterface.TYPE_STRING, "COUNT aggregate function"); + r.addValue(val, ValueMetaInterface.TYPE_STRING, getFunctionCount()); + list.add(r); + // Schema-table combination + r = new RowMetaAndData(); + r.addValue(par, ValueMetaInterface.TYPE_STRING, "Schema / Table combination"); + r.addValue(val, ValueMetaInterface.TYPE_STRING, getQuotedSchemaTableCombination("SCHEMA", "TABLE")); + list.add(r); + // Limit clause + r = new RowMetaAndData(); + r.addValue(par, ValueMetaInterface.TYPE_STRING, "LIMIT clause for 100 rows"); + r.addValue(val, ValueMetaInterface.TYPE_STRING, getLimitClause(100)); + list.add(r); + // add column statement + r = new RowMetaAndData(); + r.addValue(par, ValueMetaInterface.TYPE_STRING, "Add column statement"); + r.addValue(val, ValueMetaInterface.TYPE_STRING, getAddColumnStatement( + "TABLE", testValue, null, false, null, false)); + list.add(r); + // drop column statement + r = new RowMetaAndData(); + r.addValue(par, ValueMetaInterface.TYPE_STRING, "Drop column statement"); + r.addValue(val, ValueMetaInterface.TYPE_STRING, getDropColumnStatement( + "TABLE", testValue, null, false, null, false)); + list.add(r); + // Modify column statement + r = new RowMetaAndData(); + r.addValue(par, ValueMetaInterface.TYPE_STRING, "Modify column statement"); + r.addValue(val, ValueMetaInterface.TYPE_STRING, getModifyColumnStatement( + "TABLE", testValue, null, false, null, false)); + list.add(r); + + // List of reserved words + String reserved = ""; + if (getReservedWords() != null) { + for (int i = 0; i < getReservedWords().length; i++) { + reserved += (i > 0 ? ", " : "") + getReservedWords()[i]; + } + } + r = new RowMetaAndData(); + r.addValue(par, ValueMetaInterface.TYPE_STRING, "List of reserved words"); + r.addValue(val, ValueMetaInterface.TYPE_STRING, reserved); + list.add(r); + + // Quote reserved words? + r = new RowMetaAndData(); + r.addValue(par, ValueMetaInterface.TYPE_STRING, "Quote reserved words?"); + r.addValue(val, ValueMetaInterface.TYPE_STRING, quoteReservedWords() ? "Y" : "N"); + list.add(r); + // Start Quote + r = new RowMetaAndData(); + r.addValue(par, ValueMetaInterface.TYPE_STRING, "Start quote for reserved words"); + r.addValue(val, ValueMetaInterface.TYPE_STRING, getStartQuote()); + list.add(r); + // End Quote + r = new RowMetaAndData(); + r.addValue(par, ValueMetaInterface.TYPE_STRING, "End quote for reserved words"); + r.addValue(val, ValueMetaInterface.TYPE_STRING, getEndQuote()); + list.add(r); + + // List of table types + String types = ""; + String[] slist = getTableTypes(); + if (slist != null) { + for (int i = 0; i < slist.length; i++) { + types += (i > 0 ? ", " : "") + slist[i]; + } + } + r = new RowMetaAndData(); + r.addValue(par, ValueMetaInterface.TYPE_STRING, "List of JDBC table types"); + r.addValue(val, ValueMetaInterface.TYPE_STRING, types); + list.add(r); + + // List of view types + types = ""; + slist = getViewTypes(); + if (slist != null) { + for (int i = 0; i < slist.length; i++) { + types += (i > 0 ? ", " : "") + slist[i]; + } + } + r = new RowMetaAndData(); + r.addValue(par, ValueMetaInterface.TYPE_STRING, "List of JDBC view types"); + r.addValue(val, ValueMetaInterface.TYPE_STRING, types); + list.add(r); + + // List of synonym types + types = ""; + slist = getSynonymTypes(); + if (slist != null) { + for (int i = 0; i < slist.length; i++) { + types += (i > 0 ? ", " : "") + slist[i]; + } + } + r = new RowMetaAndData(); + r.addValue(par, ValueMetaInterface.TYPE_STRING, "List of JDBC synonym types"); + r.addValue(val, ValueMetaInterface.TYPE_STRING, types); + list.add(r); + + // Use schema-name to get list of tables? + r = new RowMetaAndData(); + r.addValue(par, ValueMetaInterface.TYPE_STRING, "use schema name to get table list?"); + r.addValue(val, ValueMetaInterface.TYPE_STRING, useSchemaNameForTableList() ? "Y" : "N"); + list.add(r); + // supports view? + r = new RowMetaAndData(); + r.addValue(par, ValueMetaInterface.TYPE_STRING, "supports views?"); + r.addValue(val, ValueMetaInterface.TYPE_STRING, supportsViews() ? "Y" : "N"); + list.add(r); + // supports synonyms? + r = new RowMetaAndData(); + r.addValue(par, ValueMetaInterface.TYPE_STRING, "supports synonyms?"); + r.addValue(val, ValueMetaInterface.TYPE_STRING, supportsSynonyms() ? "Y" : "N"); + list.add(r); + // SQL: get list of procedures? + r = new RowMetaAndData(); + r.addValue(par, ValueMetaInterface.TYPE_STRING, "SQL: list of procedures"); + r.addValue(val, ValueMetaInterface.TYPE_STRING, getSQLListOfProcedures()); + list.add(r); + // SQL: get truncate table statement? + r = new RowMetaAndData(); + r.addValue(par, ValueMetaInterface.TYPE_STRING, "SQL: truncate table"); + String truncateStatement = getTruncateTableStatement(null, "TABLE"); + r.addValue(val, ValueMetaInterface.TYPE_STRING, truncateStatement != null + ? truncateStatement : "Not supported by this database type"); + list.add(r); + // supports float rounding on update? + r = new RowMetaAndData(); + r.addValue(par, ValueMetaInterface.TYPE_STRING, "supports floating point rounding on update/insert"); + r.addValue(val, ValueMetaInterface.TYPE_STRING, supportsFloatRoundingOnUpdate() ? "Y" : "N"); + list.add(r); + // supports time stamp to date conversion + r = new RowMetaAndData(); + r.addValue(par, ValueMetaInterface.TYPE_STRING, "supports timestamp-date conversion"); + r.addValue(val, ValueMetaInterface.TYPE_STRING, supportsTimeStampToDateConversion() ? "Y" : "N"); + list.add(r); + // supports batch updates + r = new RowMetaAndData(); + r.addValue(par, ValueMetaInterface.TYPE_STRING, "supports batch updates"); + r.addValue(val, ValueMetaInterface.TYPE_STRING, supportsBatchUpdates() ? "Y" : "N"); + list.add(r); + // supports boolean values + r = new RowMetaAndData(); + r.addValue(par, ValueMetaInterface.TYPE_STRING, "supports boolean data type"); + r.addValue(val, ValueMetaInterface.TYPE_STRING, supportsBooleanDataType() ? "Y" : "N"); + list.add(r); + } + + return list; + } + + /** + * @return true if the database result sets support getTimeStamp() to retrieve date-time. (Date) + */ + public boolean supportsTimeStampToDateConversion() { + return databaseInterface.supportsTimeStampToDateConversion(); + } + + /** + * @return true if the database JDBC driver supports batch updates For example Interbase doesn't support this! + */ + public boolean supportsBatchUpdates() { + return databaseInterface.supportsBatchUpdates(); + } + + /** + * @return true if the database supports a boolean, bit, logical, ... datatype + */ + public boolean supportsBooleanDataType() { + return databaseInterface.supportsBooleanDataType(); + } + + /** + * @param b Set to true if the database supports a boolean, bit, logical, ... datatype + */ + public void setSupportsBooleanDataType(boolean b) { + databaseInterface.setSupportsBooleanDataType(b); + } + + /** + * @return true if the database supports the Timestamp data type (nanosecond precision and all) + */ + public boolean supportsTimestampDataType() { + return databaseInterface.supportsTimestampDataType(); + } + + /** + * @param b Set to true if the database supports the Timestamp data type (nanosecond precision and all) + */ + public void setSupportsTimestampDataType(boolean b) { + databaseInterface.setSupportsTimestampDataType(b); + } + + /** + * @return true if reserved words' case should be preserved + */ + public boolean preserveReservedCase() { + return databaseInterface.preserveReservedCase(); + } + + /** + * @return true if reserved words' case should be preserved + */ + public void setPreserveReservedCase(boolean b) { + databaseInterface.setPreserveReservedCase(b); + } + + /** + * Changes the names of the fields to their quoted equivalent if this is needed + * + * @param fields The row of fields to change + */ + public void quoteReservedWords(RowMetaInterface fields) { + for (int i = 0; i < fields.size(); i++) { + ValueMetaInterface v = fields.getValueMeta(i); + v.setName(quoteField(v.getName())); + } + } + + /** + * @return a map of all the extra URL options you want to set. + */ + public Map getExtraOptions() { + return databaseInterface.getExtraOptions(); + } + + /** + * @return true if the database supports connection options in the URL, false if they are put in a Properties object. + */ + public boolean supportsOptionsInURL() { + return databaseInterface.supportsOptionsInURL(); + } + + /** + * @return extra help text on the supported options on the selected database platform. + */ + public String getExtraOptionsHelpText() { + return databaseInterface.getExtraOptionsHelpText(); + } + + /** + * @return true if the database JDBC driver supports getBlob on the resultset. If not we must use getBytes() to get + * the data. + */ + public boolean supportsGetBlob() { + return databaseInterface.supportsGetBlob(); + } + + /** + * @return The SQL to execute right after connecting + */ + public String getConnectSQL() { + return databaseInterface.getConnectSQL(); + } + + /** + * @param sql The SQL to execute right after connecting + */ + public void setConnectSQL(String sql) { + databaseInterface.setConnectSQL(sql); + } + + /** + * @return true if the database supports setting the maximum number of return rows in a resultset. + */ + public boolean supportsSetMaxRows() { + return databaseInterface.supportsSetMaxRows(); + } + + /** + * Verify the name of the database and if required, change it if it already exists in the list of databases. + * + * @param databases the databases to check against. + * @param oldname the old name of the database + * @return the new name of the database connection + */ + public String verifyAndModifyDatabaseName(List databases, String oldname) { + String name = getName(); + if (name.equalsIgnoreCase(oldname)) { + return name; // nothing to see here: move along! + } + + int nr = 2; + while (DatabaseMeta.findDatabase(databases, getName()) != null) { + setName(name + " " + nr); + setDisplayName(name + " " + nr); + nr++; + } + return getName(); + } + + /** + * @return true if we want to use a database connection pool + */ + public boolean isUsingConnectionPool() { + return databaseInterface.isUsingConnectionPool(); + } + + /** + * @param usePool true if we want to use a database connection pool + */ + public void setUsingConnectionPool(boolean usePool) { + databaseInterface.setUsingConnectionPool(usePool); + } + + /** + * @return the maximum pool size + */ + public int getMaximumPoolSize() { + return databaseInterface.getMaximumPoolSize(); + } + + /** + * @param maximumPoolSize the maximum pool size + */ + public void setMaximumPoolSize(int maximumPoolSize) { + databaseInterface.setMaximumPoolSize(maximumPoolSize); + } + + /** + * @return the initial pool size + */ + public int getInitialPoolSize() { + return databaseInterface.getInitialPoolSize(); + } + + /** + * @param initalPoolSize the initial pool size + */ + public void setInitialPoolSize(int initalPoolSize) { + databaseInterface.setInitialPoolSize(initalPoolSize); + } + + /** + * @return true if the connection contains partitioning information + */ + public boolean isPartitioned() { + return databaseInterface.isPartitioned(); + } + + /** + * @param partitioned true if the connection is set to contain partitioning information + */ + public void setPartitioned(boolean partitioned) { + databaseInterface.setPartitioned(partitioned); + } + + /** + * @return the available partition/host/databases/port combinations in the cluster + */ + public PartitionDatabaseMeta[] getPartitioningInformation() { + if (!isPartitioned()) { + return new PartitionDatabaseMeta[]{}; + } + return databaseInterface.getPartitioningInformation(); + } + + /** + * @param partitionInfo the available partition/host/databases/port combinations in the cluster + */ + public void setPartitioningInformation(PartitionDatabaseMeta[] partitionInfo) { + databaseInterface.setPartitioningInformation(partitionInfo); + } + + /** + * Finds the partition metadata for the given partition iD + * + * @param partitionId The partition ID to look for + * @return the partition database metadata or null if nothing was found. + */ + public PartitionDatabaseMeta getPartitionMeta(String partitionId) { + PartitionDatabaseMeta[] partitionInfo = getPartitioningInformation(); + for (int i = 0; i < partitionInfo.length; i++) { + if (partitionInfo[i].getPartitionId().equals(partitionId)) { + return partitionInfo[i]; + } + } + return null; + } + + public Properties getConnectionPoolingProperties() { + return databaseInterface.getConnectionPoolingProperties(); + } + + public void setConnectionPoolingProperties(Properties properties) { + databaseInterface.setConnectionPoolingProperties(properties); + } + + public String getSQLTableExists(String tablename) { + return databaseInterface.getSQLTableExists(tablename); + } + + public String getSQLColumnExists(String columnname, String tablename) { + return databaseInterface.getSQLColumnExists(columnname, tablename); + } + + public boolean needsToLockAllTables() { + return databaseInterface.needsToLockAllTables(); + } + + /** + * @return true if the database is streaming results (normally this is an option just for MySQL). + */ + public boolean isStreamingResults() { + return databaseInterface.isStreamingResults(); + } + + /** + * @param useStreaming true if we want the database to stream results (normally this is an option just for MySQL). + */ + public void setStreamingResults(boolean useStreaming) { + databaseInterface.setStreamingResults(useStreaming); + } + + /** + * @return true if all fields should always be quoted in db + */ + public boolean isQuoteAllFields() { + return databaseInterface.isQuoteAllFields(); + } + + /** + * @param quoteAllFields true if all fields in DB should be quoted. + */ + public void setQuoteAllFields(boolean quoteAllFields) { + databaseInterface.setQuoteAllFields(quoteAllFields); + } + + /** + * @return true if all identifiers should be forced to lower case + */ + public boolean isForcingIdentifiersToLowerCase() { + return databaseInterface.isForcingIdentifiersToLowerCase(); + } + + /** + * @param forceLowerCase true if all identifiers should be forced to lower case + */ + public void setForcingIdentifiersToLowerCase(boolean forceLowerCase) { + databaseInterface.setForcingIdentifiersToLowerCase(forceLowerCase); + } + + /** + * @return true if all identifiers should be forced to upper case + */ + public boolean isForcingIdentifiersToUpperCase() { + return databaseInterface.isForcingIdentifiersToUpperCase(); + } + + /** + * @param forceUpperCase true if all identifiers should be forced to upper case + */ + public void setForcingIdentifiersToUpperCase(boolean forceUpperCase) { + databaseInterface.setForcingIdentifiersToUpperCase(forceUpperCase); + } + + /** + * Find a database with a certain name in an arraylist of databases. + * + * @param databases The ArrayList of databases + * @param dbname The name of the database connection + * @return The database object if one was found, null otherwise. + */ + public static final DatabaseMeta findDatabase(List databases, String dbname) { + if (databases == null) { + return null; + } + + for (int i = 0; i < databases.size(); i++) { + DatabaseMeta ci = (DatabaseMeta) databases.get(i); + if (ci.getName().equalsIgnoreCase(dbname)) { + return ci; + } + } + return null; + } + + public static int indexOfName(String[] databaseNames, String name) { + if (databaseNames == null || name == null) { + return -1; + } + + for (int i = 0; i < databaseNames.length; i++) { + String databaseName = databaseNames[i]; + if (name.equalsIgnoreCase(databaseName)) { + return i; + } + } + + return -1; + } + + /** + * Find a database with a certain ID in an arraylist of databases. + * + * @param databases The ArrayList of databases + * @param id The id of the database connection + * @return The database object if one was found, null otherwise. + */ + public static final DatabaseMeta findDatabase(List databases, ObjectId id) { + if (databases == null) { + return null; + } + + for (DatabaseMeta ci : databases) { + if (ci.getObjectId() != null && ci.getObjectId().equals(id)) { + return ci; + } + } + return null; + } + + @Override + public void copyVariablesFrom(VariableSpace space) { + variables.copyVariablesFrom(space); + } + + @Override + public String environmentSubstitute(String aString) { + return variables.environmentSubstitute(aString); + } + + @Override + public String[] environmentSubstitute(String[] aString) { + return variables.environmentSubstitute(aString); + } + + @Override + public String fieldSubstitute(String aString, RowMetaInterface rowMeta, Object[] rowData) throws KettleValueException { + return variables.fieldSubstitute(aString, rowMeta, rowData); + } + + @Override + public VariableSpace getParentVariableSpace() { + return variables.getParentVariableSpace(); + } + + @Override + public void setParentVariableSpace(VariableSpace parent) { + variables.setParentVariableSpace(parent); + } + + @Override + public String getVariable(String variableName, String defaultValue) { + return variables.getVariable(variableName, defaultValue); + } + + @Override + public String getVariable(String variableName) { + return variables.getVariable(variableName); + } + + @Override + public boolean getBooleanValueOfVariable(String variableName, boolean defaultValue) { + if (!Const.isEmpty(variableName)) { + String value = environmentSubstitute(variableName); + if (!Const.isEmpty(value)) { + return ValueMetaBase.convertStringToBoolean(value); + } + } + return defaultValue; + } + + @Override + public void initializeVariablesFrom(VariableSpace parent) { + variables.initializeVariablesFrom(parent); + } + + @Override + public String[] listVariables() { + return variables.listVariables(); + } + + @Override + public void setVariable(String variableName, String variableValue) { + variables.setVariable(variableName, variableValue); + } + + @Override + public void shareVariablesWith(VariableSpace space) { + variables = space; + } + + @Override + public void injectVariables(Map prop) { + variables.injectVariables(prop); + } + + /** + * @return the SQL Server instance + */ + public String getSQLServerInstance() { + // This is also covered/persisted by JDBC option MS SQL Server / instancename / + // We want to return + // --> MSSQL.instancename + return getExtraOptions().get("MSSQL.instance"); + } + + /** + * @param instanceName the SQL Server instance + */ + public void setSQLServerInstance(String instanceName) { + // This is also covered/persisted by JDBC option MS SQL Server / instancename / + // We want to return set + // --> MSSQL.instancename + addExtraOption("MSSQL", "instance", instanceName); + } + + /** + * @return true if the Microsoft SQL server uses two decimals (..) to separate schema and table (default==false). + */ + public boolean isUsingDoubleDecimalAsSchemaTableSeparator() { + return databaseInterface.isUsingDoubleDecimalAsSchemaTableSeparator(); + } + + /** + * @param useDoubleDecimalSeparator true if we want the database to stream results (normally this is an option just for MySQL). + */ + public void setUsingDoubleDecimalAsSchemaTableSeparator(boolean useDoubleDecimalSeparator) { + databaseInterface.setUsingDoubleDecimalAsSchemaTableSeparator(useDoubleDecimalSeparator); + } + + /** + * @return true if this database needs a transaction to perform a query (auto-commit turned off). + */ + public boolean isRequiringTransactionsOnQueries() { + return databaseInterface.isRequiringTransactionsOnQueries(); + } + + public String testConnection() { + + StringBuffer report = new StringBuffer(); + + // If the plug-in needs to provide connection information, we ask the DatabaseInterface... + // + try { + DatabaseFactoryInterface factory = getDatabaseFactory(); + return factory.getConnectionTestReport(this); + } catch (ClassNotFoundException e) { + report + .append(BaseMessages.getString(PKG, "BaseDatabaseMeta.TestConnectionReportNotImplemented.Message")) + .append(Const.CR); + report.append(BaseMessages.getString(PKG, "DatabaseMeta.report.ConnectionError", getName()) + + e.toString() + Const.CR); + report.append(Const.getStackTracker(e) + Const.CR); + } catch (Exception e) { + report.append(BaseMessages.getString(PKG, "DatabaseMeta.report.ConnectionError", getName()) + + e.toString() + Const.CR); + report.append(Const.getStackTracker(e) + Const.CR); + } + return report.toString(); + } + + public DatabaseFactoryInterface getDatabaseFactory() throws Exception { + PluginRegistry registry = PluginRegistry.getInstance(); + PluginInterface plugin = registry.getPlugin(DatabasePluginType.class, databaseInterface.getPluginId()); + if (plugin == null) { + throw new KettleDatabaseException("database type with plugin id [" + + databaseInterface.getPluginId() + "] couldn't be found!"); + } + + ClassLoader loader = registry.getClassLoader(plugin); + + Class clazz = Class.forName(databaseInterface.getDatabaseFactoryName(), true, loader); + return (DatabaseFactoryInterface) clazz.newInstance(); + } + + public String getPreferredSchemaName() { + return databaseInterface.getPreferredSchemaName(); + } + + public void setPreferredSchemaName(String preferredSchemaName) { + databaseInterface.setPreferredSchemaName(preferredSchemaName); + } + + /** + * Not used in this case, simply return root / + */ + @Override + public RepositoryDirectoryInterface getRepositoryDirectory() { + return new RepositoryDirectory(); + } + + @Override + public void setRepositoryDirectory(RepositoryDirectoryInterface repositoryDirectory) { + throw new RuntimeException("Setting a directory on a database connection is not supported"); + } + + @Override + public RepositoryObjectType getRepositoryElementType() { + return REPOSITORY_ELEMENT_TYPE; + } + + @Override + public ObjectRevision getObjectRevision() { + return objectRevision; + } + + @Override + public void setObjectRevision(ObjectRevision objectRevision) { + this.objectRevision = objectRevision; + } + + @Override + public String getDescription() { + // NOT USED + return null; + } + + @Override + public void setDescription(String description) { + // NOT USED + } + + public boolean supportsSequenceNoMaxValueOption() { + return databaseInterface.supportsSequenceNoMaxValueOption(); + } + + public boolean requiresCreateTablePrimaryKeyAppend() { + return databaseInterface.requiresCreateTablePrimaryKeyAppend(); + } + + public boolean requiresCastToVariousForIsNull() { + return databaseInterface.requiresCastToVariousForIsNull(); + } + + public boolean isDisplaySizeTwiceThePrecision() { + return databaseInterface.isDisplaySizeTwiceThePrecision(); + } + + public boolean supportsPreparedStatementMetadataRetrieval() { + return databaseInterface.supportsPreparedStatementMetadataRetrieval(); + } + + public boolean isSystemTable(String tableName) { + return databaseInterface.isSystemTable(tableName); + } + + private boolean supportsNewLinesInSQL() { + return databaseInterface.supportsNewLinesInSQL(); + } + + public String getSQLListOfSchemas() { + return databaseInterface.getSQLListOfSchemas(); + } + + public int getMaxColumnsInIndex() { + return databaseInterface.getMaxColumnsInIndex(); + } + + public boolean supportsErrorHandlingOnBatchUpdates() { + return databaseInterface.supportsErrorHandlingOnBatchUpdates(); + } + + /** + * Get the SQL to insert a new empty unknown record in a dimension. + * + * @param schemaTable the schema-table name to insert into + * @param keyField The key field + * @param versionField the version field + * @return the SQL to insert the unknown record into the SCD. + */ + public String getSQLInsertAutoIncUnknownDimensionRow(String schemaTable, String keyField, String versionField) { + return databaseInterface.getSQLInsertAutoIncUnknownDimensionRow(schemaTable, keyField, versionField); + } + + /** + * @return true if this is a relational database you can explore. Return false for SAP, PALO, etc. + */ + public boolean isExplorable() { + return databaseInterface.isExplorable(); + } + + /** + * @return The SQL on this database to get a list of sequences. + */ + public String getSQLListOfSequences() { + return databaseInterface.getSQLListOfSequences(); + } + + public String quoteSQLString(String string) { + return databaseInterface.quoteSQLString(string); + } + + /** + * @see DatabaseInterface#generateColumnAlias(int, String) + */ + public String generateColumnAlias(int columnIndex, String suggestedName) { + return databaseInterface.generateColumnAlias(columnIndex, suggestedName); + } + + public boolean isMySQLVariant() { + return databaseInterface.isMySQLVariant(); + } + + public Long getNextBatchId(Database ldb, String schemaName, String tableName, String fieldName) throws KettleDatabaseException { + return databaseInterface.getNextBatchId(this, ldb, schemaName, tableName, fieldName); + } + + public Object getValueFromResultSet(ResultSet rs, ValueMetaInterface val, int i) throws KettleDatabaseException { + return databaseInterface.getValueFromResultSet(rs, val, i); + } + + /** + * Marker used to determine if the DatabaseMeta should be allowed to be modified/saved. It does NOT prevent object + * modification. + * + * @return + */ + public boolean isReadOnly() { + return readOnly; + } + + /** + * Sets the marker used to determine if the DatabaseMeta should be allowed to be modified/saved. Setting to true does + * NOT prevent object modification. + * + * @return + */ + public void setReadOnly(boolean readOnly) { + this.readOnly = readOnly; + } + + public String getSequenceNoMaxValueOption() { + return databaseInterface.getSequenceNoMaxValueOption(); + } + + /** + * @return true if the database supports autoGeneratedKeys + */ + public boolean supportsAutoGeneratedKeys() { + return databaseInterface.supportsAutoGeneratedKeys(); + } + + + /** + * Customizes the ValueMetaInterface defined in the base + * + * @return String the create table statement + */ + public String getCreateTableStatement() { + return databaseInterface.getCreateTableStatement(); + } +} diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/www/GetStatusServlet.java b/pentaho-kettle/src/main/java/org/pentaho/di/www/GetStatusServlet.java new file mode 100644 index 0000000..f90bdb1 --- /dev/null +++ b/pentaho-kettle/src/main/java/org/pentaho/di/www/GetStatusServlet.java @@ -0,0 +1,459 @@ +/*! ****************************************************************************** + * + * Pentaho Data Integration + * + * Copyright (C) 2002-2013 by Pentaho : http://www.pentaho.com + * + ******************************************************************************* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + ******************************************************************************/ + +package org.pentaho.di.www; + +import org.pentaho.di.core.Const; +import org.pentaho.di.core.exception.KettleException; +import org.pentaho.di.core.xml.XMLHandler; +import org.pentaho.di.i18n.BaseMessages; +import org.pentaho.di.job.Job; +import org.pentaho.di.trans.Trans; + +import javax.servlet.ServletException; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import java.io.IOException; +import java.io.PrintWriter; +import java.lang.management.OperatingSystemMXBean; +import java.lang.management.RuntimeMXBean; +import java.lang.management.ThreadMXBean; +import java.net.URLEncoder; +import java.util.Collections; +import java.util.List; + +public class GetStatusServlet extends BaseHttpServlet implements CartePluginInterface { + private static Class PKG = GetStatusServlet.class; // for i18n purposes, needed by Translator2!! + + private static final long serialVersionUID = 3634806745372015720L; + + public static final String CONTEXT_PATH = "/kettle/status"; + + public GetStatusServlet() { + } + + public GetStatusServlet(TransformationMap transformationMap, JobMap jobMap) { + super(transformationMap, jobMap); + } + + /** + *
+ *

/kettle/status

+ * + *

GET

+ *

Retrieve server status. The status contains information about the server itself (OS, memory, etc) + * and information about jobs and transformations present on the server.

+ *

+ *

Example Request:
+ *

+     * GET /kettle/status/?xml=Y
+     * 
+ *

+ *

+ *

Parameters

+ *

+ * + * + * + * + * + * + * + * + * + * + * + * + *
namedescriptiontype
xmlBoolean flag which defines output format Y forces XML output to be generated. + * HTML is returned otherwise.boolean, optional
+ *

+ *

Response Body

+ *

+ * + * + * + * + * + * + * + * + * + * + * + *
element:(custom)
media types:text/xml, text/html
+ *

Response XML or HTML response containing details about the transformation specified. + * If an error occurs during method invocation result field of the response + * will contain ERROR status.

+ *

+ *

Example Response:

+ *
+     * 
+     * 
+     * Online
+     * 229093440
+     * 285736960
+     * 4
+     * 7534848300
+     * 68818403
+     * 45
+     * -1.0
+     * Windows 7
+     * 6.1
+     * amd64
+     * 
+     * 
+     * Row generator test
+     * 56c93d4e-96c1-4fae-92d9-d864b0779845
+     * Waiting
+     * 
+     * N
+     * 
+     * 
+     * 0
+     * 0
+     * <![CDATA[]]>
+     * 
+     * 
+     * dummy-trans
+     * c56961b2-c848-49b8-abde-76c8015e29b0
+     * Stopped
+     * 
+     * N
+     * 
+     * 
+     * 0
+     * 0
+     * <![CDATA[]]>
+     * 
+     * 
+     * 
+     * 
+     * dummy_job
+     * abd61143-8174-4f27-9037-6b22fbd3e229
+     * Stopped
+     * 
+     * <![CDATA[]]>
+     * 0
+     * 0
+     * 
+     * 
+     * 
+     * 
+ *

+ *

Status Codes

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
codedescription
200Request was processed.
500Internal server error occurs during request processing.
+ * + */ + public void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, + IOException { + if (isJettyMode() && !request.getContextPath().startsWith(CONTEXT_PATH)) { + return; + } + + if (log.isDebug()) { + logDebug(BaseMessages.getString(PKG, "GetStatusServlet.StatusRequested")); + } + response.setStatus(HttpServletResponse.SC_OK); + + boolean useXML = "Y".equalsIgnoreCase(request.getParameter("xml")); + + if (useXML) { + response.setContentType("text/xml"); + response.setCharacterEncoding(Const.XML_ENCODING); + } else { + response.setContentType("text/html;charset=UTF-8"); + } + + PrintWriter out = response.getWriter(); + + List transEntries = getTransformationMap().getTransformationObjects(); + List jobEntries = getJobMap().getJobObjects(); + + if (useXML) { + out.print(XMLHandler.getXMLHeader(Const.XML_ENCODING)); + SlaveServerStatus serverStatus = new SlaveServerStatus(); + serverStatus.setStatusDescription("Online"); + + getSystemInfo(serverStatus); + + for (CarteObjectEntry entry : transEntries) { + String name = entry.getName(); + String id = entry.getId(); + Trans trans = getTransformationMap().getTransformation(entry); + String status = trans.getStatus(); + + SlaveServerTransStatus sstatus = new SlaveServerTransStatus(name, id, status); + sstatus.setPaused(trans.isPaused()); + serverStatus.getTransStatusList().add(sstatus); + } + + for (CarteObjectEntry entry : jobEntries) { + String name = entry.getName(); + String id = entry.getId(); + Job job = getJobMap().getJob(entry); + String status = job.getStatus(); + + serverStatus.getJobStatusList().add(new SlaveServerJobStatus(name, id, status)); + } + + try { + out.println(serverStatus.getXML()); + } catch (KettleException e) { + throw new ServletException("Unable to get the server status in XML format", e); + } + } else { + out.println(""); + out.println("" + + BaseMessages.getString(PKG, "GetStatusServlet.KettleSlaveServerStatus") + ""); + out.println(""); + out.println(""); + out.println(""); + out.println("

" + BaseMessages.getString(PKG, "GetStatusServlet.TopStatus") + "

"); + + try { + out.println(""); + out.print(""); + + Collections.sort(transEntries); + + for (CarteObjectEntry entry : transEntries) { + String name = entry.getName(); + String id = entry.getId(); + Trans trans = getTransformationMap().getTransformation(entry); + String status = trans.getStatus(); + String removeText = ""; + // Finished, Stopped, Waiting : allow the user to remove the transformation + // + if (trans.isFinished() || trans.isStopped() || (!trans.isInitializing() && !trans.isRunning())) { + removeText = + " Remove "; + } + + out.print(""); + out.print(""); + out.print(""); + out.print(""); + out.print(""); + out.print(""); + out.print(""); + } + out.print("
" + + BaseMessages.getString(PKG, "GetStatusServlet.TransName") + " " + + BaseMessages.getString(PKG, "GetStatusServlet.CarteId") + " " + + BaseMessages.getString(PKG, "GetStatusServlet.Status") + " " + + BaseMessages.getString(PKG, "GetStatusServlet.LastLogDate") + " " + + BaseMessages.getString(PKG, "GetStatusServlet.Remove") + "
" + name + "" + id + "" + status + "" + + (trans.getLogDate() == null ? "-" : XMLHandler.date2string(trans.getLogDate())) + "" + removeText + "

"); + + out.println(""); + out.print(""); + + Collections.sort(jobEntries); + + for (CarteObjectEntry entry : jobEntries) { + String name = entry.getName(); + String id = entry.getId(); + Job job = getJobMap().getJob(entry); + String status = job.getStatus(); + + String removeText; + if (job.isFinished() || job.isStopped()) { + removeText = + " Remove "; + } else { + removeText = ""; + } + + out.print(""); + out.print(""); + out.print(""); + out.print(""); + out.print(""); + out.print(""); + out.print(""); + } + out.print("
" + + BaseMessages.getString(PKG, "GetStatusServlet.JobName") + " " + + BaseMessages.getString(PKG, "GetStatusServlet.CarteId") + " " + + BaseMessages.getString(PKG, "GetStatusServlet.Status") + " " + + BaseMessages.getString(PKG, "GetStatusServlet.LastLogDate") + " " + + BaseMessages.getString(PKG, "GetStatusServlet.Remove") + "
" + name + "" + id + "" + status + "" + + (job.getLogDate() == null ? "-" : XMLHandler.date2string(job.getLogDate())) + "" + removeText + "
"); + + } catch (Exception ex) { + out.println("

"); + out.println("

");
+                ex.printStackTrace(out);
+                out.println("
"); + } + + out.println("

"); + out.println("

" + + BaseMessages.getString(PKG, "GetStatusServlet.ConfigurationDetails.Title") + "

"); + out.println(""); + out.print(""); + + // The max number of log lines in the back-end + // + SlaveServerConfig serverConfig = getTransformationMap().getSlaveServerConfig(); + if (serverConfig != null) { + String maxLines = ""; + if (serverConfig.getMaxLogLines() == 0) { + maxLines = BaseMessages.getString(PKG, "GetStatusServlet.NoLimit"); + } else { + maxLines = serverConfig.getMaxLogLines() + BaseMessages.getString(PKG, "GetStatusServlet.Lines"); + } + out.print(""); + + // The max age of log lines + // + String maxAge = ""; + if (serverConfig.getMaxLogTimeoutMinutes() == 0) { + maxAge = BaseMessages.getString(PKG, "GetStatusServlet.NoLimit"); + } else { + maxAge = serverConfig.getMaxLogTimeoutMinutes() + BaseMessages.getString(PKG, "GetStatusServlet.Minutes"); + } + out.print(""); + + // The max age of stale objects + // + String maxObjAge = ""; + if (serverConfig.getObjectTimeoutMinutes() == 0) { + maxObjAge = BaseMessages.getString(PKG, "GetStatusServlet.NoLimit"); + } else { + maxObjAge = serverConfig.getObjectTimeoutMinutes() + BaseMessages.getString(PKG, "GetStatusServlet.Minutes"); + } + out.print(""); + + // The name of the specified repository + // + String repositoryName; + try { + repositoryName = serverConfig.getRepository() != null ? serverConfig.getRepository().getName() : ""; + } catch (Exception e) { + logError(BaseMessages.getString(PKG, "GetStatusServlet.Parameter.RepositoryName.UnableToConnect", + serverConfig.getRepositoryId()), e); + repositoryName = BaseMessages.getString(PKG, "GetStatusServlet.Parameter.RepositoryName.UnableToConnect", + serverConfig.getRepositoryId()); + } + out.print(""); + + out.print("
" + + BaseMessages.getString(PKG, "GetStatusServlet.Parameter.Title") + " " + + BaseMessages.getString(PKG, "GetStatusServlet.Value.Title") + "
" + + BaseMessages.getString(PKG, "GetStatusServlet.Parameter.MaxLogLines") + " " + maxLines + + "
" + + BaseMessages.getString(PKG, "GetStatusServlet.Parameter.MaxLogLinesAge") + " " + maxAge + + "
" + + BaseMessages.getString(PKG, "GetStatusServlet.Parameter.MaxObjectsAge") + " " + maxObjAge + + "
" + + BaseMessages.getString(PKG, "GetStatusServlet.Parameter.RepositoryName") + " " + + repositoryName + "
"); + + String filename = serverConfig.getFilename(); + if (filename == null) { + filename = BaseMessages.getString(PKG, "GetStatusServlet.ConfigurationDetails.UsingDefaults"); + } + out + .println("" + + BaseMessages.getString(PKG, "GetStatusServlet.ConfigurationDetails.Advice", filename) + + "
"); + } + out.println(""); + out.println(""); + } + } + + private static void getSystemInfo(SlaveServerStatus serverStatus) { + OperatingSystemMXBean operatingSystemMXBean = + java.lang.management.ManagementFactory.getOperatingSystemMXBean(); + ThreadMXBean threadMXBean = java.lang.management.ManagementFactory.getThreadMXBean(); + RuntimeMXBean runtimeMXBean = java.lang.management.ManagementFactory.getRuntimeMXBean(); + + int cores = Runtime.getRuntime().availableProcessors(); + + long freeMemory = Runtime.getRuntime().freeMemory(); + long totalMemory = Runtime.getRuntime().totalMemory(); + String osArch = operatingSystemMXBean.getArch(); + String osName = operatingSystemMXBean.getName(); + String osVersion = operatingSystemMXBean.getVersion(); + double loadAvg = operatingSystemMXBean.getSystemLoadAverage(); + + int threadCount = threadMXBean.getThreadCount(); + long allThreadsCpuTime = 0L; + + long[] threadIds = threadMXBean.getAllThreadIds(); + for (int i = 0; i < threadIds.length; i++) { + allThreadsCpuTime += threadMXBean.getThreadCpuTime(threadIds[i]); + } + + long uptime = runtimeMXBean.getUptime(); + + serverStatus.setCpuCores(cores); + serverStatus.setCpuProcessTime(allThreadsCpuTime); + serverStatus.setUptime(uptime); + serverStatus.setThreadCount(threadCount); + serverStatus.setLoadAvg(loadAvg); + serverStatus.setOsName(osName); + serverStatus.setOsVersion(osVersion); + serverStatus.setOsArchitecture(osArch); + serverStatus.setMemoryFree(freeMemory); + serverStatus.setMemoryTotal(totalMemory); + + } + + public String toString() { + return "Status Handler"; + } + + public String getService() { + return CONTEXT_PATH + " (" + toString() + ")"; + } + + public String getContextPath() { + return CONTEXT_PATH; + } + +} From dead2fc535ef3b0251c05af4a850be30d026ec08 Mon Sep 17 00:00:00 2001 From: Zhichun Wu Date: Sun, 28 Aug 2016 18:54:42 +0800 Subject: [PATCH 09/15] Move original Trans and TransMeta to correct package --- .../src/main/java/org/pentaho/di/trans/{steps => }/Trans.java | 0 .../src/main/java/org/pentaho/di/trans/{steps => }/TransMeta.java | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename pentaho-kettle/src/main/java/org/pentaho/di/trans/{steps => }/Trans.java (100%) rename pentaho-kettle/src/main/java/org/pentaho/di/trans/{steps => }/TransMeta.java (100%) diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/Trans.java b/pentaho-kettle/src/main/java/org/pentaho/di/trans/Trans.java similarity index 100% rename from pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/Trans.java rename to pentaho-kettle/src/main/java/org/pentaho/di/trans/Trans.java diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/TransMeta.java b/pentaho-kettle/src/main/java/org/pentaho/di/trans/TransMeta.java similarity index 100% rename from pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/TransMeta.java rename to pentaho-kettle/src/main/java/org/pentaho/di/trans/TransMeta.java From 1a7b1f6ba0ab143ab45f8300d4f84ee7d6dc701b Mon Sep 17 00:00:00 2001 From: Zhichun Wu Date: Sun, 28 Aug 2016 18:58:58 +0800 Subject: [PATCH 10/15] Make sure job and transformation parameters will be passed from master to slave too --- .../src/main/java/org/pentaho/di/job/Job.java | 202 +++++++++--------- .../main/java/org/pentaho/di/job/JobMeta.java | 56 ++--- .../main/java/org/pentaho/di/trans/Trans.java | 10 + 3 files changed, 144 insertions(+), 124 deletions(-) diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/job/Job.java b/pentaho-kettle/src/main/java/org/pentaho/di/job/Job.java index f1978b1..45af58c 100644 --- a/pentaho-kettle/src/main/java/org/pentaho/di/job/Job.java +++ b/pentaho-kettle/src/main/java/org/pentaho/di/job/Job.java @@ -1530,28 +1530,28 @@ public void setInternalKettleVariables(VariableSpace var) { } /* - * (non-Javadoc) - * - * @see org.pentaho.di.core.variables.VariableSpace#copyVariablesFrom(org.pentaho.di.core.variables.VariableSpace) - */ + * (non-Javadoc) + * + * @see org.pentaho.di.core.variables.VariableSpace#copyVariablesFrom(org.pentaho.di.core.variables.VariableSpace) + */ public void copyVariablesFrom(VariableSpace space) { variables.copyVariablesFrom(space); } /* - * (non-Javadoc) - * - * @see org.pentaho.di.core.variables.VariableSpace#environmentSubstitute(java.lang.String) - */ + * (non-Javadoc) + * + * @see org.pentaho.di.core.variables.VariableSpace#environmentSubstitute(java.lang.String) + */ public String environmentSubstitute(String aString) { return variables.environmentSubstitute(aString); } /* - * (non-Javadoc) - * - * @see org.pentaho.di.core.variables.VariableSpace#environmentSubstitute(java.lang.String[]) - */ + * (non-Javadoc) + * + * @see org.pentaho.di.core.variables.VariableSpace#environmentSubstitute(java.lang.String[]) + */ public String[] environmentSubstitute(String[] aString) { return variables.environmentSubstitute(aString); } @@ -1562,47 +1562,47 @@ public String fieldSubstitute(String aString, RowMetaInterface rowMeta, Object[] } /* - * (non-Javadoc) - * - * @see org.pentaho.di.core.variables.VariableSpace#getParentVariableSpace() - */ + * (non-Javadoc) + * + * @see org.pentaho.di.core.variables.VariableSpace#getParentVariableSpace() + */ public VariableSpace getParentVariableSpace() { return variables.getParentVariableSpace(); } /* - * (non-Javadoc) - * - * @see - * org.pentaho.di.core.variables.VariableSpace#setParentVariableSpace(org.pentaho.di.core.variables.VariableSpace) - */ + * (non-Javadoc) + * + * @see + * org.pentaho.di.core.variables.VariableSpace#setParentVariableSpace(org.pentaho.di.core.variables.VariableSpace) + */ public void setParentVariableSpace(VariableSpace parent) { variables.setParentVariableSpace(parent); } /* - * (non-Javadoc) - * - * @see org.pentaho.di.core.variables.VariableSpace#getVariable(java.lang.String, java.lang.String) - */ + * (non-Javadoc) + * + * @see org.pentaho.di.core.variables.VariableSpace#getVariable(java.lang.String, java.lang.String) + */ public String getVariable(String variableName, String defaultValue) { return variables.getVariable(variableName, defaultValue); } /* - * (non-Javadoc) - * - * @see org.pentaho.di.core.variables.VariableSpace#getVariable(java.lang.String) - */ + * (non-Javadoc) + * + * @see org.pentaho.di.core.variables.VariableSpace#getVariable(java.lang.String) + */ public String getVariable(String variableName) { return variables.getVariable(variableName); } /* - * (non-Javadoc) - * - * @see org.pentaho.di.core.variables.VariableSpace#getBooleanValueOfVariable(java.lang.String, boolean) - */ + * (non-Javadoc) + * + * @see org.pentaho.di.core.variables.VariableSpace#getBooleanValueOfVariable(java.lang.String, boolean) + */ public boolean getBooleanValueOfVariable(String variableName, boolean defaultValue) { if (!Const.isEmpty(variableName)) { String value = environmentSubstitute(variableName); @@ -1614,47 +1614,47 @@ public boolean getBooleanValueOfVariable(String variableName, boolean defaultVal } /* - * (non-Javadoc) - * - * @see - * org.pentaho.di.core.variables.VariableSpace#initializeVariablesFrom(org.pentaho.di.core.variables.VariableSpace) - */ + * (non-Javadoc) + * + * @see + * org.pentaho.di.core.variables.VariableSpace#initializeVariablesFrom(org.pentaho.di.core.variables.VariableSpace) + */ public void initializeVariablesFrom(VariableSpace parent) { variables.initializeVariablesFrom(parent); } /* - * (non-Javadoc) - * - * @see org.pentaho.di.core.variables.VariableSpace#listVariables() - */ + * (non-Javadoc) + * + * @see org.pentaho.di.core.variables.VariableSpace#listVariables() + */ public String[] listVariables() { return variables.listVariables(); } /* - * (non-Javadoc) - * - * @see org.pentaho.di.core.variables.VariableSpace#setVariable(java.lang.String, java.lang.String) - */ + * (non-Javadoc) + * + * @see org.pentaho.di.core.variables.VariableSpace#setVariable(java.lang.String, java.lang.String) + */ public void setVariable(String variableName, String variableValue) { variables.setVariable(variableName, variableValue); } /* - * (non-Javadoc) - * - * @see org.pentaho.di.core.variables.VariableSpace#shareVariablesWith(org.pentaho.di.core.variables.VariableSpace) - */ + * (non-Javadoc) + * + * @see org.pentaho.di.core.variables.VariableSpace#shareVariablesWith(org.pentaho.di.core.variables.VariableSpace) + */ public void shareVariablesWith(VariableSpace space) { variables = space; } /* - * (non-Javadoc) - * - * @see org.pentaho.di.core.variables.VariableSpace#injectVariables(java.util.Map) - */ + * (non-Javadoc) + * + * @see org.pentaho.di.core.variables.VariableSpace#injectVariables(java.util.Map) + */ public void injectVariables(Map prop) { variables.injectVariables(prop); } @@ -1726,6 +1726,16 @@ public static String sendToSlaveServer(JobMeta jobMeta, JobExecutionConfiguratio executionConfiguration.getVariables().put(var, jobMeta.getVariable(var)); } + Map jobParams = new HashMap(); + for (String key : jobMeta.listParameters()) { + String value = jobMeta.getParameterValue(key); + String defaultValue = jobMeta.getParameterDefault(key); + jobParams.put(key, + executionConfiguration.getVariables().getOrDefault(key, value == null ? defaultValue : value)); + } + + executionConfiguration.getParams().putAll(jobParams); + if (executionConfiguration.isPassingExport()) { // First export the job... slaveServer.getVariable("MASTER_HOST") // @@ -1811,7 +1821,7 @@ public void removeJobListener(JobListener jobListener) { /** * Remove a job entry listener from the job * - * @param jobListener the job entry listener to remove + * @param jobEntryListener the job entry listener to remove */ public void removeJobEntryListener(JobEntryListener jobEntryListener) { jobEntryListeners.remove(jobEntryListener); @@ -1856,83 +1866,83 @@ public void setFinished(boolean finished) { } /* - * (non-Javadoc) - * - * @see org.pentaho.di.core.parameters.NamedParams#addParameterDefinition(java.lang.String, java.lang.String, - * java.lang.String) - */ + * (non-Javadoc) + * + * @see org.pentaho.di.core.parameters.NamedParams#addParameterDefinition(java.lang.String, java.lang.String, + * java.lang.String) + */ public void addParameterDefinition(String key, String defValue, String description) throws DuplicateParamException { namedParams.addParameterDefinition(key, defValue, description); } /* - * (non-Javadoc) - * - * @see org.pentaho.di.core.parameters.NamedParams#getParameterDescription(java.lang.String) - */ + * (non-Javadoc) + * + * @see org.pentaho.di.core.parameters.NamedParams#getParameterDescription(java.lang.String) + */ public String getParameterDescription(String key) throws UnknownParamException { return namedParams.getParameterDescription(key); } /* - * (non-Javadoc) - * - * @see org.pentaho.di.core.parameters.NamedParams#getParameterDefault(java.lang.String) - */ + * (non-Javadoc) + * + * @see org.pentaho.di.core.parameters.NamedParams#getParameterDefault(java.lang.String) + */ public String getParameterDefault(String key) throws UnknownParamException { return namedParams.getParameterDefault(key); } /* - * (non-Javadoc) - * - * @see org.pentaho.di.core.parameters.NamedParams#getParameterValue(java.lang.String) - */ + * (non-Javadoc) + * + * @see org.pentaho.di.core.parameters.NamedParams#getParameterValue(java.lang.String) + */ public String getParameterValue(String key) throws UnknownParamException { return namedParams.getParameterValue(key); } /* - * (non-Javadoc) - * - * @see org.pentaho.di.core.parameters.NamedParams#listParameters() - */ + * (non-Javadoc) + * + * @see org.pentaho.di.core.parameters.NamedParams#listParameters() + */ public String[] listParameters() { return namedParams.listParameters(); } /* - * (non-Javadoc) - * - * @see org.pentaho.di.core.parameters.NamedParams#setParameterValue(java.lang.String, java.lang.String) - */ + * (non-Javadoc) + * + * @see org.pentaho.di.core.parameters.NamedParams#setParameterValue(java.lang.String, java.lang.String) + */ public void setParameterValue(String key, String value) throws UnknownParamException { namedParams.setParameterValue(key, value); } /* - * (non-Javadoc) - * - * @see org.pentaho.di.core.parameters.NamedParams#eraseParameters() - */ + * (non-Javadoc) + * + * @see org.pentaho.di.core.parameters.NamedParams#eraseParameters() + */ public void eraseParameters() { namedParams.eraseParameters(); } /* - * (non-Javadoc) - * - * @see org.pentaho.di.core.parameters.NamedParams#clearParameters() - */ + * (non-Javadoc) + * + * @see org.pentaho.di.core.parameters.NamedParams#clearParameters() + */ public void clearParameters() { namedParams.clearParameters(); } /* - * (non-Javadoc) - * - * @see org.pentaho.di.core.parameters.NamedParams#activateParameters() - */ + * (non-Javadoc) + * + * @see org.pentaho.di.core.parameters.NamedParams#activateParameters() + */ public void activateParameters() { String[] keys = listParameters(); @@ -1959,10 +1969,10 @@ public void activateParameters() { } /* - * (non-Javadoc) - * - * @see org.pentaho.di.core.parameters.NamedParams#copyParametersFrom(org.pentaho.di.core.parameters.NamedParams) - */ + * (non-Javadoc) + * + * @see org.pentaho.di.core.parameters.NamedParams#copyParametersFrom(org.pentaho.di.core.parameters.NamedParams) + */ public void copyParametersFrom(NamedParams params) { namedParams.copyParametersFrom(params); } diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/job/JobMeta.java b/pentaho-kettle/src/main/java/org/pentaho/di/job/JobMeta.java index b4d0d89..f3ebc75 100644 --- a/pentaho-kettle/src/main/java/org/pentaho/di/job/JobMeta.java +++ b/pentaho-kettle/src/main/java/org/pentaho/di/job/JobMeta.java @@ -445,10 +445,10 @@ public void clearChanged() { } /* - * (non-Javadoc) - * - * @see org.pentaho.di.core.changed.ChangedFlag#hasChanged() - */ + * (non-Javadoc) + * + * @see org.pentaho.di.core.changed.ChangedFlag#hasChanged() + */ @Override public boolean hasChanged() { if (super.hasChanged()) { @@ -496,10 +496,10 @@ public boolean isDatabaseConnectionUsed(DatabaseMeta databaseMeta) { } /* - * (non-Javadoc) - * - * @see org.pentaho.di.core.EngineMetaInterface#getFileType() - */ + * (non-Javadoc) + * + * @see org.pentaho.di.core.EngineMetaInterface#getFileType() + */ public String getFileType() { return LastUsedFile.FILE_TYPE_JOB; } @@ -536,10 +536,10 @@ public String getDefaultExtension() { } /* - * (non-Javadoc) - * - * @see org.pentaho.di.core.xml.XMLInterface#getXML() - */ + * (non-Javadoc) + * + * @see org.pentaho.di.core.xml.XMLInterface#getXML() + */ public String getXML() { Props props = null; if (Props.isInitialized()) { @@ -1154,10 +1154,10 @@ public SharedObjects readSharedObjects() throws KettleException { } /* - * (non-Javadoc) - * - * @see org.pentaho.di.core.EngineMetaInterface#saveSharedObjects() - */ + * (non-Javadoc) + * + * @see org.pentaho.di.core.EngineMetaInterface#saveSharedObjects() + */ public void saveSharedObjects() throws KettleException { try { // First load all the shared objects... @@ -2637,10 +2637,10 @@ public List getJobhops() { } /* - * (non-Javadoc) - * - * @see org.pentaho.di.repository.RepositoryElementInterface#getRepositoryElementType() - */ + * (non-Javadoc) + * + * @see org.pentaho.di.repository.RepositoryElementInterface#getRepositoryElementType() + */ public RepositoryObjectType getRepositoryElementType() { return REPOSITORY_ELEMENT_TYPE; } @@ -2663,19 +2663,19 @@ public List composeJobEntryInterfaceList() { } /* - * (non-Javadoc) - * - * @see org.pentaho.di.core.logging.LoggingObjectInterface#getLogChannelId() - */ + * (non-Javadoc) + * + * @see org.pentaho.di.core.logging.LoggingObjectInterface#getLogChannelId() + */ public String getLogChannelId() { return null; } /* - * (non-Javadoc) - * - * @see org.pentaho.di.core.logging.LoggingObjectInterface#getObjectType() - */ + * (non-Javadoc) + * + * @see org.pentaho.di.core.logging.LoggingObjectInterface#getObjectType() + */ public LoggingObjectType getObjectType() { return LoggingObjectType.JOBMETA; } diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/trans/Trans.java b/pentaho-kettle/src/main/java/org/pentaho/di/trans/Trans.java index 06ce49a..9600ac6 100644 --- a/pentaho-kettle/src/main/java/org/pentaho/di/trans/Trans.java +++ b/pentaho-kettle/src/main/java/org/pentaho/di/trans/Trans.java @@ -4116,6 +4116,16 @@ public static String sendToSlaveServer(TransMeta transMeta, TransExecutionConfig vars.put(var, transMeta.getVariable(var)); } + Map transParams = new HashMap(); + for (String key : transMeta.listParameters()) { + String value = transMeta.getParameterValue(key); + String defaultValue = transMeta.getParameterDefault(key); + transParams.put(key, + executionConfiguration.getVariables().getOrDefault(key, value == null ? defaultValue : value)); + } + + executionConfiguration.getParams().putAll(transParams); + executionConfiguration.getVariables().putAll(vars); slaveServer.injectVariables(executionConfiguration.getVariables()); From f0575f249d176c2ebd518e5cdcece4c0dcd5eecc Mon Sep 17 00:00:00 2001 From: Zhichun Wu Date: Sun, 28 Aug 2016 19:17:37 +0800 Subject: [PATCH 11/15] Enhance job and transformation * get remote logs incrementally * be aggressive when sending jobs and transformations to slave * hard-coded "passingExport=true" --- .../job/entries/RemoteJobEntryLogHelper.java | 95 ++++++++++ .../di/job/entries/job/JobEntryJob.java | 36 +++- .../di/job/entries/trans/JobEntryTrans.java | 31 ++- .../di/resource/ResourceDefinitionHelper.java | 179 ++++++++++++++++++ 4 files changed, 326 insertions(+), 15 deletions(-) create mode 100644 pentaho-kettle/src/main/java/org/pentaho/di/job/entries/RemoteJobEntryLogHelper.java create mode 100644 pentaho-kettle/src/main/java/org/pentaho/di/resource/ResourceDefinitionHelper.java diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/job/entries/RemoteJobEntryLogHelper.java b/pentaho-kettle/src/main/java/org/pentaho/di/job/entries/RemoteJobEntryLogHelper.java new file mode 100644 index 0000000..a5d397f --- /dev/null +++ b/pentaho-kettle/src/main/java/org/pentaho/di/job/entries/RemoteJobEntryLogHelper.java @@ -0,0 +1,95 @@ +/*! ****************************************************************************** + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + ******************************************************************************/ +package org.pentaho.di.job.entries; + +import org.pentaho.di.cluster.SlaveServer; +import org.pentaho.di.core.logging.LogChannelInterface; + +/** + * Utility class for streaming log entries from slave back to master. + * + * @author Zhichun Wu + */ +public final class RemoteJobEntryLogHelper { + private static final String UNKNOWN_SERVER = "unknown server"; + private static final String UNKNOW_OBJECT = "unknown object"; + + private final LogChannelInterface logger; + + private final String serverAddress; + private final String objectId; + + private int lastLogEntryNo; + + public RemoteJobEntryLogHelper(SlaveServer server, String objectId, LogChannelInterface logger) { + this.logger = logger; + + this.serverAddress = server == null || server.getName() == null ? UNKNOWN_SERVER : server.getName(); + this.objectId = objectId == null ? UNKNOW_OBJECT : objectId; + + this.lastLogEntryNo = 0; + } + + public int getLastLogEntryNo() { + return this.lastLogEntryNo; + } + + public void log(String logString, int firstEntryLineNo, int lastEntryLineNo) { + if (logger == null || logString == null) { + return; + } + + int length = logString.length(); + int lineDiff = firstEntryLineNo - lastLogEntryNo; + + if (length > 0 && lastLogEntryNo != lastEntryLineNo) { + try { + logger.logBasic(new StringBuilder() + .append("---> Replay logs L") + .append(firstEntryLineNo) + .append(" ~ L") + .append(lastEntryLineNo) + .append(" from [") + .append(objectId) + .append('@') + .append(serverAddress) + .append("]: ") + .append(length) + .append(" bytes <---").toString()); + + if (lineDiff != 0) { + logger.logError(new StringBuffer() + .append("*** Somehow we ") + .append(lineDiff > 0 ? "lost " : "got duplicated ") + .append(Math.abs(lineDiff)) + .append(" lines of logs from [") + .append(objectId) + .append('@') + .append(serverAddress) + .append("] ***") + .toString()); + } + + logger.logBasic(logString); + } catch (Throwable t) { + // ignore as logging failure is trivial + // t.printStackTrace(); + } + } + + lastLogEntryNo = lastEntryLineNo; + } +} diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/job/entries/job/JobEntryJob.java b/pentaho-kettle/src/main/java/org/pentaho/di/job/entries/job/JobEntryJob.java index e26537c..f8d9390 100644 --- a/pentaho-kettle/src/main/java/org/pentaho/di/job/entries/job/JobEntryJob.java +++ b/pentaho-kettle/src/main/java/org/pentaho/di/job/entries/job/JobEntryJob.java @@ -44,16 +44,14 @@ import org.pentaho.di.job.Job; import org.pentaho.di.job.JobExecutionConfiguration; import org.pentaho.di.job.JobMeta; +import org.pentaho.di.job.entries.RemoteJobEntryLogHelper; import org.pentaho.di.job.entry.JobEntryBase; import org.pentaho.di.job.entry.JobEntryInterface; import org.pentaho.di.job.entry.validator.AndValidator; import org.pentaho.di.job.entry.validator.JobEntryValidatorUtils; import org.pentaho.di.repository.*; -import org.pentaho.di.resource.ResourceDefinition; -import org.pentaho.di.resource.ResourceEntry; +import org.pentaho.di.resource.*; import org.pentaho.di.resource.ResourceEntry.ResourceType; -import org.pentaho.di.resource.ResourceNamingInterface; -import org.pentaho.di.resource.ResourceReference; import org.pentaho.di.www.SlaveServerJobStatus; import org.pentaho.metastore.api.IMetaStore; import org.w3c.dom.Node; @@ -934,9 +932,18 @@ public Result execute(Result result, int nr) throws KettleException { // Now start the monitoring... // SlaveServerJobStatus jobStatus = null; + + RemoteJobEntryLogHelper logHelper = new RemoteJobEntryLogHelper( + remoteSlaveServer, carteObjectId, this.log); + while (!parentJob.isStopped() && waitingToFinish) { try { - jobStatus = remoteSlaveServer.getJobStatus(jobMeta.getName(), carteObjectId, 0); + jobStatus = remoteSlaveServer.getJobStatus(jobMeta.getName(), carteObjectId, + logHelper.getLastLogEntryNo()); + logHelper.log(jobStatus.getLoggingString(), + jobStatus.getFirstLoggingLineNr(), jobStatus.getLastLoggingLineNr()); + + if (jobStatus.getResult() != null) { // The job is finished, get the result... // @@ -951,6 +958,7 @@ public Result execute(Result result, int nr) throws KettleException { // come back on-line } + // FIXME why the hell JobEntryTrans sleeps two seconds instead of one // sleep for 1 second try { Thread.sleep(1000); @@ -1091,8 +1099,8 @@ private boolean createParentFolder(String filename) { /** * Make sure that we are not loading jobs recursively... * - * @param parentJobMeta the parent job metadata - * @param jobMeta the job metadata + * @param parentJob the parent job + * @param jobMeta the job metadata * @throws KettleException in case both jobs are loaded from the same source */ private void verifyRecursiveExecution(Job parentJob, JobMeta jobMeta) throws KettleException { @@ -1195,7 +1203,8 @@ public JobMeta getJobMeta(Repository rep, IMetaStore metaStore, VariableSpace sp String dirStr = realFilename.substring(0, realFilename.lastIndexOf("/")); String tmpFilename = realFilename.substring(realFilename.lastIndexOf("/") + 1); RepositoryDirectoryInterface dir = rep.findDirectory(dirStr); - jobMeta = rep.loadJob(tmpFilename, dir, null, null); + // jobMeta = rep.loadJob(tmpFilename, dir, null, null); + jobMeta = ResourceDefinitionHelper.loadJob(rep, dir, tmpFilename); } catch (KettleException ke) { // try without extension if (realFilename.endsWith(Const.STRING_JOB_DEFAULT_EXT)) { @@ -1323,6 +1332,17 @@ public String exportResources(VariableSpace space, Map params = transExecutionConfiguration.getParams(); for (String param : transMeta.listParameters()) { @@ -969,9 +969,14 @@ public Result execute(Result result, int nr) throws KettleException { // Now start the monitoring... // SlaveServerTransStatus transStatus = null; + RemoteJobEntryLogHelper logHelper = new RemoteJobEntryLogHelper( + remoteSlaveServer, carteObjectId, this.log); while (!parentJob.isStopped() && waitingToFinish) { try { - transStatus = remoteSlaveServer.getTransStatus(transMeta.getName(), carteObjectId, 0); + transStatus = remoteSlaveServer.getTransStatus(transMeta.getName(), carteObjectId, + logHelper.getLastLogEntryNo()); + logHelper.log(transStatus.getLoggingString(), + transStatus.getFirstLoggingLineNr(), transStatus.getLastLoggingLineNr()); if (!transStatus.isRunning()) { // The transformation is finished, get the result... // @@ -1187,7 +1192,8 @@ public TransMeta getTransMeta(Repository rep, IMetaStore metaStore, VariableSpac String dirStr = realFilename.substring(0, realFilename.lastIndexOf("/")); String tmpFilename = realFilename.substring(realFilename.lastIndexOf("/") + 1); RepositoryDirectoryInterface dir = rep.findDirectory(dirStr); - transMeta = rep.loadTransformation(tmpFilename, dir, null, true, null); + // transMeta = rep.loadTransformation(tmpFilename, dir, null, true, null); + transMeta = ResourceDefinitionHelper.loadTransformation(rep, dir, tmpFilename); } catch (KettleException ke) { // try without extension if (realFilename.endsWith(Const.STRING_TRANS_DEFAULT_EXT)) { @@ -1347,7 +1353,7 @@ public List getResourceDependencies(JobMeta jobMeta) { /** * We're going to load the transformation meta data referenced here. Then we're going to give it a new filename, * modify that filename in this entries. The parent caller will have made a copy of it, so it should be OK to do so. - *

+ *

* Exports the object to a flat-file system, adding content with filename keys to a set of definitions. The supplied * resource naming interface allows the object to name appropriately without worrying about those parts of the * implementation specific details. @@ -1372,6 +1378,17 @@ public String exportResources(VariableSpace space, Map PKG = JobMeta.class; // for i18n purposes, needed by Translator2!! + + private final static String VARIABLE_PREFIX = "${"; + private final static String VARIABLE_SUFFIX = "}"; + + public static class TransMetaCollection extends TransMeta { + private final List attachedMeta = new ArrayList<>(); + + public void attachTransMeta(TransMeta transMeta) { + attachedMeta.add(transMeta); + } + + public List getAttachedMeta() { + return attachedMeta; + } + } + + public static class JobMetaCollection extends JobMeta { + private final List attachedMeta = new ArrayList<>(); + + public void attachJobMeta(JobMeta jobMeta) { + attachedMeta.add(jobMeta); + } + + public List getAttachedMeta() { + return attachedMeta; + } + } + + public static boolean containsVariable(String name) { + boolean hasVar = false; + + if (name != null) { + int index = name.indexOf(VARIABLE_PREFIX); + // variable name should at least contain one character + index = index >= 0 ? name.indexOf(VARIABLE_SUFFIX, index + VARIABLE_PREFIX.length() + 1) : -1; + + hasVar = index > 0; + } + + return hasVar; + } + + public static boolean containsResource(Map definitions, + VariableSpace space, + ResourceNamingInterface namingInterface, + AbstractMeta meta) throws KettleException { + if (definitions == null || space == null || namingInterface == null || meta == null) { + return false; + } + + // String baseName; + // String originalPath; + String extension = meta instanceof TransMeta ? Const.STRING_TRANS_DEFAULT_EXT : Const.STRING_JOB_DEFAULT_EXT; + String fullname; + try { + RepositoryDirectoryInterface directory = meta.getRepositoryDirectory(); + if (Const.isEmpty(meta.getFilename())) { + // Assume repository... + // + // originalPath = directory.getPath(); + // baseName = meta.getName(); + fullname = + directory.getPath() + (directory.getPath().endsWith(RepositoryDirectory.DIRECTORY_SEPARATOR) ? "" + : RepositoryDirectory.DIRECTORY_SEPARATOR) + meta.getName() + "." + extension; // + } else { + // Assume file + // + FileObject fileObject = KettleVFS.getFileObject(space.environmentSubstitute(meta.getFilename()), space); + // originalPath = fileObject.getParent().getName().getPath(); + // baseName = fileObject.getName().getBaseName(); + fullname = fileObject.getName().getPath(); + } + // } catch (FileSystemException e) { + // throw new KettleException( + // BaseMessages.getString(PKG, "JobMeta.Exception.AnErrorOccuredReadingJob", meta.getFilename()), e); + } catch (KettleFileException e) { + throw new KettleException( + BaseMessages.getString(PKG, "JobMeta.Exception.AnErrorOccuredReadingJob", meta.getFilename()), e); + } + + // String resourceName = namingInterface.nameResource( + // baseName, originalPath, extension, ResourceNamingInterface.FileNamingType.JOB); + // logger.logBasic("=====> Checking [" + fullname + "] in " + definitions + " result=" + definitions.containsKey(fullname)); + return definitions.containsKey(fullname) || meta.equals(space); + } + + public static TransMeta loadTransformation( + Repository rep, RepositoryDirectoryInterface dir, String realFileName) throws KettleException { + TransMeta transMeta = null; + if (rep == null || dir == null || realFileName == null) { + return transMeta; + } + + if (containsVariable(realFileName)) { + TransMetaCollection tmc = new TransMetaCollection(); + transMeta = tmc; + transMeta.setFilename(realFileName); + for (RepositoryElementMetaInterface element : dir.getRepositoryObjects()) { + if (element.getObjectType() != RepositoryObjectType.TRANSFORMATION) { + continue; + } + + tmc.attachTransMeta(rep.loadTransformation(element.getName(), dir, null, true, null)); + } + } else { + transMeta = rep.loadTransformation(realFileName, dir, null, true, null); + } + + return transMeta; + } + + public static JobMeta loadJob( + Repository rep, RepositoryDirectoryInterface dir, String realFileName) throws KettleException { + JobMeta jobMeta = null; + if (rep == null || dir == null || realFileName == null) { + return jobMeta; + } + + if (containsVariable(realFileName)) { + JobMetaCollection jmc = new JobMetaCollection(); + jobMeta = jmc; + jobMeta.setFilename(realFileName); + for (RepositoryElementMetaInterface element : dir.getRepositoryObjects()) { + if (element.getObjectType() != RepositoryObjectType.JOB) { + continue; + } + + jmc.attachJobMeta(rep.loadJob(element.getName(), dir, null, null)); + } + } else { + jobMeta = rep.loadJob(realFileName, dir, null, null); + } + + return jobMeta; + } + + private ResourceDefinitionHelper() { + } +} From b291f29e38858bc2afbff39f4b53d4a2df8b45ad Mon Sep 17 00:00:00 2001 From: Zhichun Wu Date: Sun, 28 Aug 2016 19:21:21 +0800 Subject: [PATCH 12/15] Fix missing values in exported transformation --- .../di/trans/steps/StreamingSteps.java | 61 +++++++++++++++++++ .../di/trans/steps/append/AppendMeta.java | 17 ++++-- .../steps/filterrows/FilterRowsMeta.java | 19 ++++-- .../steps/javafilter/JavaFilterMeta.java | 19 ++++-- .../trans/steps/mergejoin/MergeJoinMeta.java | 19 ++++-- .../trans/steps/mergerows/MergeRowsMeta.java | 19 ++++-- .../steps/tableinput/TableInputMeta.java | 11 +++- 7 files changed, 141 insertions(+), 24 deletions(-) create mode 100644 pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/StreamingSteps.java diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/StreamingSteps.java b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/StreamingSteps.java new file mode 100644 index 0000000..27f13c1 --- /dev/null +++ b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/StreamingSteps.java @@ -0,0 +1,61 @@ +/*! ****************************************************************************** + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + ******************************************************************************/ +package org.pentaho.di.trans.steps; + +import org.pentaho.di.trans.step.StepIOMetaInterface; +import org.pentaho.di.trans.step.StepMetaInterface; +import org.pentaho.di.trans.step.errorhandling.StreamInterface; + +import java.util.ArrayList; +import java.util.List; + +/** + * This class represents names of input / output steps. + * + * @author Zhichun Wu + */ +public final class StreamingSteps { + private final String[] stepNames; + + public StreamingSteps(StepMetaInterface stepMeta) { + this(stepMeta, StreamInterface.StreamType.INFO); + } + + public StreamingSteps(StepMetaInterface stepMeta, StreamInterface.StreamType streamType) { + StepIOMetaInterface stepIOMeta = stepMeta == null ? null : stepMeta.getStepIOMeta(); + List streams = stepIOMeta == null + ? null : (streamType == StreamInterface.StreamType.OUTPUT + ? stepIOMeta.getTargetStreams() : stepIOMeta.getInfoStreams()); + + if (streams == null) { + streams = new ArrayList(0); + } + + stepNames = new String[streams.size()]; + for (int i = 0; i < stepNames.length; i++) { + String name = (String) streams.get(i).getSubject(); + stepNames[i] = name == null ? streams.get(i).getStepname() : name; + } + } + + public String getStepName() { + return getStepName(0); + } + + public String getStepName(int index) { + return (index < 0 || index >= stepNames.length) ? null : stepNames[index]; + } +} diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/append/AppendMeta.java b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/append/AppendMeta.java index c6cc0d7..80efc14 100644 --- a/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/append/AppendMeta.java +++ b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/append/AppendMeta.java @@ -42,6 +42,7 @@ import org.pentaho.di.trans.step.errorhandling.StreamIcon; import org.pentaho.di.trans.step.errorhandling.StreamInterface; import org.pentaho.di.trans.step.errorhandling.StreamInterface.StreamType; +import org.pentaho.di.trans.steps.StreamingSteps; import org.pentaho.metastore.api.IMetaStore; import org.w3c.dom.Node; @@ -54,6 +55,8 @@ public class AppendMeta extends BaseStepMeta implements StepMetaInterface { private static Class PKG = Append.class; // for i18n purposes, needed by Translator2!! + private StreamingSteps inputSteps; + public AppendMeta() { super(); // allocate BaseStepMeta } @@ -72,8 +75,10 @@ public String getXML() { StringBuilder retval = new StringBuilder(); List infoStreams = getStepIOMeta().getInfoStreams(); - retval.append(XMLHandler.addTagValue("head_name", infoStreams.get(0).getStepname())); - retval.append(XMLHandler.addTagValue("tail_name", infoStreams.get(1).getStepname())); + retval.append(XMLHandler.addTagValue("head_name", + inputSteps == null ? infoStreams.get(0).getStepname() : inputSteps.getStepName())); + retval.append(XMLHandler.addTagValue("tail_name", + inputSteps == null ? infoStreams.get(1).getStepname() : inputSteps.getStepName(1))); return retval.toString(); } @@ -85,6 +90,7 @@ private void readData(Node stepnode) throws KettleXMLException { StreamInterface tailStream = infoStreams.get(1); headStream.setSubject(XMLHandler.getTagValue(stepnode, "head_name")); tailStream.setSubject(XMLHandler.getTagValue(stepnode, "tail_name")); + inputSteps = new StreamingSteps(this); } catch (Exception e) { throw new KettleXMLException(BaseMessages.getString(PKG, "AppendMeta.Exception.UnableToLoadStepInfo"), e); } @@ -100,6 +106,7 @@ public void readRep(Repository rep, IMetaStore metaStore, ObjectId id_step, List StreamInterface tailStream = infoStreams.get(1); headStream.setSubject(rep.getStepAttributeString(id_step, "head_name")); tailStream.setSubject(rep.getStepAttributeString(id_step, "tail_name")); + inputSteps = new StreamingSteps(this); } catch (Exception e) { throw new KettleException(BaseMessages.getString( PKG, "AppendMeta.Exception.UnexpectedErrorReadingStepInfo"), e); @@ -111,8 +118,10 @@ public void saveRep(Repository rep, IMetaStore metaStore, ObjectId id_transforma List infoStreams = getStepIOMeta().getInfoStreams(); StreamInterface headStream = infoStreams.get(0); StreamInterface tailStream = infoStreams.get(1); - rep.saveStepAttribute(id_transformation, id_step, "head_name", headStream.getStepname()); - rep.saveStepAttribute(id_transformation, id_step, "tail_name", tailStream.getStepname()); + rep.saveStepAttribute(id_transformation, id_step, "head_name", + inputSteps == null ? headStream.getStepname() : inputSteps.getStepName()); + rep.saveStepAttribute(id_transformation, id_step, "tail_name", + inputSteps == null ? tailStream.getStepname() : inputSteps.getStepName(1)); } catch (Exception e) { throw new KettleException(BaseMessages.getString(PKG, "AppendMeta.Exception.UnableToSaveStepInfo") + id_step, e); diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/filterrows/FilterRowsMeta.java b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/filterrows/FilterRowsMeta.java index 509e401..a7519f9 100644 --- a/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/filterrows/FilterRowsMeta.java +++ b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/filterrows/FilterRowsMeta.java @@ -45,6 +45,7 @@ import org.pentaho.di.trans.step.errorhandling.StreamIcon; import org.pentaho.di.trans.step.errorhandling.StreamInterface; import org.pentaho.di.trans.step.errorhandling.StreamInterface.StreamType; +import org.pentaho.di.trans.steps.StreamingSteps; import org.pentaho.metastore.api.IMetaStore; import org.w3c.dom.Node; @@ -66,6 +67,8 @@ public class FilterRowsMeta extends BaseStepMeta implements StepMetaInterface { */ private Condition condition; + private StreamingSteps outputSteps; + public FilterRowsMeta() { super(); // allocate BaseStepMeta condition = new Condition(); @@ -109,8 +112,10 @@ public String getXML() throws KettleException { StringBuffer retval = new StringBuffer(200); List targetStreams = getStepIOMeta().getTargetStreams(); - retval.append(XMLHandler.addTagValue("send_true_to", targetStreams.get(0).getStepname())); - retval.append(XMLHandler.addTagValue("send_false_to", targetStreams.get(1).getStepname())); + retval.append(XMLHandler.addTagValue("send_true_to", + outputSteps == null ? targetStreams.get(0).getStepname() : outputSteps.getStepName())); + retval.append(XMLHandler.addTagValue("send_false_to", + outputSteps == null ? targetStreams.get(1).getStepname() : outputSteps.getStepName(1))); retval.append(" ").append(Const.CR); if (condition != null) { @@ -125,9 +130,9 @@ public String getXML() throws KettleException { private void readData(Node stepnode) throws KettleXMLException { try { List targetStreams = getStepIOMeta().getTargetStreams(); - targetStreams.get(0).setSubject(XMLHandler.getTagValue(stepnode, "send_true_to")); targetStreams.get(1).setSubject(XMLHandler.getTagValue(stepnode, "send_false_to")); + outputSteps = new StreamingSteps(this, StreamType.OUTPUT); Node compare = XMLHandler.getSubNode(stepnode, "compare"); Node condnode = XMLHandler.getSubNode(compare, "condition"); @@ -196,6 +201,8 @@ public void readRep(Repository rep, IMetaStore metaStore, ObjectId id_step, List targetStreams.get(0).setSubject(rep.getStepAttributeString(id_step, "send_true_to")); targetStreams.get(1).setSubject(rep.getStepAttributeString(id_step, "send_false_to")); + outputSteps = new StreamingSteps(this, StreamType.OUTPUT); + condition = rep.loadConditionFromStepAttribute(id_step, "id_condition"); } catch (Exception e) { @@ -217,8 +224,10 @@ public void saveRep(Repository rep, IMetaStore metaStore, ObjectId id_transforma List targetStreams = getStepIOMeta().getTargetStreams(); rep.saveConditionStepAttribute(id_transformation, id_step, "id_condition", condition); - rep.saveStepAttribute(id_transformation, id_step, "send_true_to", targetStreams.get(0).getStepname()); - rep.saveStepAttribute(id_transformation, id_step, "send_false_to", targetStreams.get(1).getStepname()); + rep.saveStepAttribute(id_transformation, id_step, "send_true_to", + outputSteps == null ? targetStreams.get(0).getStepname() : outputSteps.getStepName()); + rep.saveStepAttribute(id_transformation, id_step, "send_false_to", + outputSteps == null ? targetStreams.get(1).getStepname() : outputSteps.getStepName(1)); } } catch (Exception e) { throw new KettleException(BaseMessages.getString( diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/javafilter/JavaFilterMeta.java b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/javafilter/JavaFilterMeta.java index 9e0f35e..032ec0e 100644 --- a/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/javafilter/JavaFilterMeta.java +++ b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/javafilter/JavaFilterMeta.java @@ -41,6 +41,7 @@ import org.pentaho.di.trans.step.errorhandling.StreamIcon; import org.pentaho.di.trans.step.errorhandling.StreamInterface; import org.pentaho.di.trans.step.errorhandling.StreamInterface.StreamType; +import org.pentaho.di.trans.steps.StreamingSteps; import org.pentaho.metastore.api.IMetaStore; import org.w3c.dom.Node; @@ -59,6 +60,8 @@ public class JavaFilterMeta extends BaseStepMeta implements StepMetaInterface { */ private String condition; + private StreamingSteps outputSteps; + public JavaFilterMeta() { super(); // allocate BaseStepMeta } @@ -80,6 +83,8 @@ public void loadXML(Node stepnode, List databases, IMetaStore meta targetStreams.get(0).setSubject(XMLHandler.getTagValue(stepnode, "send_true_to")); targetStreams.get(1).setSubject(XMLHandler.getTagValue(stepnode, "send_false_to")); + outputSteps = new StreamingSteps(this, StreamType.OUTPUT); + condition = XMLHandler.getTagValue(stepnode, "condition"); } @@ -87,8 +92,10 @@ public String getXML() { StringBuffer retval = new StringBuffer(); List targetStreams = getStepIOMeta().getTargetStreams(); - retval.append(XMLHandler.addTagValue("send_true_to", targetStreams.get(0).getStepname())); - retval.append(XMLHandler.addTagValue("send_false_to", targetStreams.get(1).getStepname())); + retval.append(XMLHandler.addTagValue("send_true_to", + outputSteps == null ? targetStreams.get(0).getStepname() : outputSteps.getStepName())); + retval.append(XMLHandler.addTagValue("send_false_to", + outputSteps == null ? targetStreams.get(1).getStepname() : outputSteps.getStepName(1))); retval.append(XMLHandler.addTagValue("condition", condition)); @@ -119,6 +126,8 @@ public void readRep(Repository rep, IMetaStore metaStore, ObjectId id_step, List targetStreams.get(0).setSubject(rep.getStepAttributeString(id_step, "send_true_to")); targetStreams.get(1).setSubject(rep.getStepAttributeString(id_step, "send_false_to")); + outputSteps = new StreamingSteps(this, StreamType.OUTPUT); + condition = rep.getStepAttributeString(id_step, "condition"); } @@ -132,8 +141,10 @@ public void searchInfoAndTargetSteps(List steps) { public void saveRep(Repository rep, IMetaStore metaStore, ObjectId id_transformation, ObjectId id_step) throws KettleException { List targetStreams = getStepIOMeta().getTargetStreams(); - rep.saveStepAttribute(id_transformation, id_step, "send_true_to", targetStreams.get(0).getStepname()); - rep.saveStepAttribute(id_transformation, id_step, "send_false_to", targetStreams.get(1).getStepname()); + rep.saveStepAttribute(id_transformation, id_step, "send_true_to", + outputSteps == null ? targetStreams.get(0).getStepname() : outputSteps.getStepName()); + rep.saveStepAttribute(id_transformation, id_step, "send_false_to", + outputSteps == null ? targetStreams.get(1).getStepname() : outputSteps.getStepName(1)); rep.saveStepAttribute(id_transformation, id_step, "condition", condition); } diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/mergejoin/MergeJoinMeta.java b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/mergejoin/MergeJoinMeta.java index de6acf1..fe9611a 100644 --- a/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/mergejoin/MergeJoinMeta.java +++ b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/mergejoin/MergeJoinMeta.java @@ -44,6 +44,7 @@ import org.pentaho.di.trans.step.errorhandling.StreamIcon; import org.pentaho.di.trans.step.errorhandling.StreamInterface; import org.pentaho.di.trans.step.errorhandling.StreamInterface.StreamType; +import org.pentaho.di.trans.steps.StreamingSteps; import org.pentaho.metastore.api.IMetaStore; import org.w3c.dom.Node; @@ -66,6 +67,8 @@ public class MergeJoinMeta extends BaseStepMeta implements StepMetaInterface { private String[] keyFields1; private String[] keyFields2; + private StreamingSteps inputSteps; + /** * The supported join types are INNER, LEFT OUTER, RIGHT OUTER and FULL OUTER * @@ -146,8 +149,10 @@ public String getXML() { List infoStreams = getStepIOMeta().getInfoStreams(); retval.append(XMLHandler.addTagValue("join_type", getJoinType())); - retval.append(XMLHandler.addTagValue("step1", infoStreams.get(0).getStepname())); - retval.append(XMLHandler.addTagValue("step2", infoStreams.get(1).getStepname())); + retval.append(XMLHandler.addTagValue("step1", + inputSteps == null ? infoStreams.get(0).getStepname() : inputSteps.getStepName())); + retval.append(XMLHandler.addTagValue("step2", + inputSteps == null ? infoStreams.get(1).getStepname() : inputSteps.getStepName(1))); retval.append(" " + Const.CR); for (int i = 0; i < keyFields1.length; i++) { @@ -188,6 +193,8 @@ private void readData(Node stepnode) throws KettleXMLException { List infoStreams = getStepIOMeta().getInfoStreams(); infoStreams.get(0).setSubject(XMLHandler.getTagValue(stepnode, "step1")); infoStreams.get(1).setSubject(XMLHandler.getTagValue(stepnode, "step2")); + inputSteps = new StreamingSteps(this); + joinType = XMLHandler.getTagValue(stepnode, "join_type"); } catch (Exception e) { throw new KettleXMLException( @@ -217,6 +224,8 @@ public void readRep(Repository rep, IMetaStore metaStore, ObjectId id_step, List List infoStreams = getStepIOMeta().getInfoStreams(); infoStreams.get(0).setSubject(rep.getStepAttributeString(id_step, "step1")); infoStreams.get(1).setSubject(rep.getStepAttributeString(id_step, "step2")); + inputSteps = new StreamingSteps(this); + joinType = rep.getStepAttributeString(id_step, "join_type"); } catch (Exception e) { throw new KettleException(BaseMessages.getString( @@ -243,8 +252,10 @@ public void saveRep(Repository rep, IMetaStore metaStore, ObjectId id_transforma List infoStreams = getStepIOMeta().getInfoStreams(); - rep.saveStepAttribute(id_transformation, id_step, "step1", infoStreams.get(0).getStepname()); - rep.saveStepAttribute(id_transformation, id_step, "step2", infoStreams.get(1).getStepname()); + rep.saveStepAttribute(id_transformation, id_step, "step1", + inputSteps == null ? infoStreams.get(0).getStepname() : inputSteps.getStepName()); + rep.saveStepAttribute(id_transformation, id_step, "step2", + inputSteps == null ? infoStreams.get(1).getStepname() : inputSteps.getStepName(1)); rep.saveStepAttribute(id_transformation, id_step, "join_type", getJoinType()); } catch (Exception e) { throw new KettleException(BaseMessages.getString(PKG, "MergeJoinMeta.Exception.UnableToSaveStepInfo") diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/mergerows/MergeRowsMeta.java b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/mergerows/MergeRowsMeta.java index 2d5dc5e..c7f80f5 100644 --- a/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/mergerows/MergeRowsMeta.java +++ b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/mergerows/MergeRowsMeta.java @@ -45,6 +45,7 @@ import org.pentaho.di.trans.step.errorhandling.StreamIcon; import org.pentaho.di.trans.step.errorhandling.StreamInterface; import org.pentaho.di.trans.step.errorhandling.StreamInterface.StreamType; +import org.pentaho.di.trans.steps.StreamingSteps; import org.pentaho.metastore.api.IMetaStore; import org.w3c.dom.Node; @@ -63,6 +64,8 @@ public class MergeRowsMeta extends BaseStepMeta implements StepMetaInterface { private String[] keyFields; private String[] valueFields; + private StreamingSteps inputSteps; + /** * @return Returns the keyFields. */ @@ -146,8 +149,10 @@ public String getXML() { retval.append(XMLHandler.addTagValue("flag_field", flagField)); List infoStreams = getStepIOMeta().getInfoStreams(); - retval.append(XMLHandler.addTagValue("reference", infoStreams.get(0).getStepname())); - retval.append(XMLHandler.addTagValue("compare", infoStreams.get(1).getStepname())); + retval.append(XMLHandler.addTagValue("reference", + inputSteps == null ? infoStreams.get(0).getStepname() : inputSteps.getStepName())); + retval.append(XMLHandler.addTagValue("compare", + inputSteps == null ? infoStreams.get(1).getStepname() : inputSteps.getStepName(1))); retval.append(" " + Const.CR); retval.append(" " + Const.CR); @@ -184,6 +189,7 @@ private void readData(Node stepnode) throws KettleXMLException { compareStream.setSubject(XMLHandler.getTagValue(stepnode, "compare")); referenceStream.setSubject(XMLHandler.getTagValue(stepnode, "reference")); + inputSteps = new StreamingSteps(this); } catch (Exception e) { throw new KettleXMLException( BaseMessages.getString(PKG, "MergeRowsMeta.Exception.UnableToLoadStepInfo"), e); @@ -217,6 +223,7 @@ public void readRep(Repository rep, IMetaStore metaStore, ObjectId id_step, List referenceStream.setSubject(rep.getStepAttributeString(id_step, "reference")); compareStream.setSubject(rep.getStepAttributeString(id_step, "compare")); + inputSteps = new StreamingSteps(this); } catch (Exception e) { throw new KettleException(BaseMessages.getString( PKG, "MergeRowsMeta.Exception.UnexpectedErrorReadingStepInfo"), e); @@ -246,8 +253,10 @@ public void saveRep(Repository rep, IMetaStore metaStore, ObjectId id_transforma StreamInterface referenceStream = infoStreams.get(0); StreamInterface compareStream = infoStreams.get(1); - rep.saveStepAttribute(id_transformation, id_step, "reference", referenceStream.getStepname()); - rep.saveStepAttribute(id_transformation, id_step, "compare", compareStream.getStepname()); + rep.saveStepAttribute(id_transformation, id_step, "reference", + inputSteps == null ? referenceStream.getStepname() : inputSteps.getStepName()); + rep.saveStepAttribute(id_transformation, id_step, "compare", + inputSteps == null ? compareStream.getStepname() : inputSteps.getStepName(1)); } catch (Exception e) { throw new KettleException(BaseMessages.getString(PKG, "MergeRowsMeta.Exception.UnableToSaveStepInfo") + id_step, e); @@ -265,7 +274,7 @@ public String[] getTargetSteps() { public void getFields(RowMetaInterface r, String name, RowMetaInterface[] info, StepMeta nextStep, VariableSpace space, Repository repository, IMetaStore metaStore) throws KettleStepException { // We don't have any input fields here in "r" as they are all info fields. - // So we just merge in the info fields. + // So we just update in the info fields. // if (info != null) { boolean found = false; diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/tableinput/TableInputMeta.java b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/tableinput/TableInputMeta.java index 34e6bcb..d05cc20 100644 --- a/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/tableinput/TableInputMeta.java +++ b/pentaho-kettle/src/main/java/org/pentaho/di/trans/steps/tableinput/TableInputMeta.java @@ -47,6 +47,7 @@ import org.pentaho.di.trans.step.errorhandling.StreamIcon; import org.pentaho.di.trans.step.errorhandling.StreamInterface; import org.pentaho.di.trans.step.errorhandling.StreamInterface.StreamType; +import org.pentaho.di.trans.steps.StreamingSteps; import org.pentaho.metastore.api.IMetaStore; import org.w3c.dom.Node; @@ -72,6 +73,8 @@ public class TableInputMeta extends BaseStepMeta implements StepMetaInterface { private boolean lazyConversionActive; + private StreamingSteps inputSteps; + public TableInputMeta() { super(); } @@ -150,6 +153,7 @@ private void readData(Node stepnode, List datab String lookupFromStepname = XMLHandler.getTagValue(stepnode, "lookup"); StreamInterface infoStream = getStepIOMeta().getInfoStreams().get(0); infoStream.setSubject(lookupFromStepname); + inputSteps = new StreamingSteps(this); executeEachInputRow = "Y".equals(XMLHandler.getTagValue(stepnode, "execute_each_row")); variableReplacementActive = "Y".equals(XMLHandler.getTagValue(stepnode, "variables_active")); @@ -260,7 +264,8 @@ public String getXML() { retval.append(" " + XMLHandler.addTagValue("sql", sql)); retval.append(" " + XMLHandler.addTagValue("limit", rowLimit)); StreamInterface infoStream = getStepIOMeta().getInfoStreams().get(0); - retval.append(" " + XMLHandler.addTagValue("lookup", infoStream.getStepname())); + retval.append(" " + XMLHandler.addTagValue("lookup", + inputSteps == null ? infoStream.getStepname() : inputSteps.getStepName())); retval.append(" " + XMLHandler.addTagValue("execute_each_row", executeEachInputRow)); retval.append(" " + XMLHandler.addTagValue("variables_active", variableReplacementActive)); retval.append(" " + XMLHandler.addTagValue("lazy_conversion_active", lazyConversionActive)); @@ -281,6 +286,7 @@ public void readRep(Repository rep, IMetaStore metaStore, ObjectId id_step, List String lookupFromStepname = rep.getStepAttributeString(id_step, "lookup"); StreamInterface infoStream = getStepIOMeta().getInfoStreams().get(0); infoStream.setSubject(lookupFromStepname); + inputSteps = new StreamingSteps(this); executeEachInputRow = rep.getStepAttributeBoolean(id_step, "execute_each_row"); variableReplacementActive = rep.getStepAttributeBoolean(id_step, "variables_active"); @@ -296,7 +302,8 @@ public void saveRep(Repository rep, IMetaStore metaStore, ObjectId id_transforma rep.saveStepAttribute(id_transformation, id_step, "sql", sql); rep.saveStepAttribute(id_transformation, id_step, "limit", rowLimit); StreamInterface infoStream = getStepIOMeta().getInfoStreams().get(0); - rep.saveStepAttribute(id_transformation, id_step, "lookup", infoStream.getStepname()); + rep.saveStepAttribute(id_transformation, id_step, "lookup", + inputSteps == null ? infoStream.getStepname() : inputSteps.getStepName()); rep.saveStepAttribute(id_transformation, id_step, "execute_each_row", executeEachInputRow); rep.saveStepAttribute(id_transformation, id_step, "variables_active", variableReplacementActive); rep.saveStepAttribute(id_transformation, id_step, "lazy_conversion_active", lazyConversionActive); From 2cfc2ae19abefaccd6ef9c16d974bb8b399246ce Mon Sep 17 00:00:00 2001 From: Zhichun Wu Date: Sun, 28 Aug 2016 19:31:12 +0800 Subject: [PATCH 13/15] Improve /kettle/status HTML output * sort jobs and transformations by start date * show real name of wrapped job / transformation --- .../org/pentaho/di/www/GetStatusServlet.java | 55 +++++++++++++++++-- 1 file changed, 50 insertions(+), 5 deletions(-) diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/www/GetStatusServlet.java b/pentaho-kettle/src/main/java/org/pentaho/di/www/GetStatusServlet.java index f90bdb1..a97a1c9 100644 --- a/pentaho-kettle/src/main/java/org/pentaho/di/www/GetStatusServlet.java +++ b/pentaho-kettle/src/main/java/org/pentaho/di/www/GetStatusServlet.java @@ -39,6 +39,8 @@ import java.lang.management.ThreadMXBean; import java.net.URLEncoder; import java.util.Collections; +import java.util.Comparator; +import java.util.Date; import java.util.List; public class GetStatusServlet extends BaseHttpServlet implements CartePluginInterface { @@ -48,6 +50,9 @@ public class GetStatusServlet extends BaseHttpServlet implements CartePluginInte public static final String CONTEXT_PATH = "/kettle/status"; + // this is helpful when you implemented a job or transformation as wrapper for others + public static final String JOB_NAME_PARAM = System.getProperty("KETTLE_JOB_NAME_PARAM", "ETL_SCRIPT"); + public GetStatusServlet() { } @@ -244,6 +249,47 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) thro out.println(""); out.println("

" + BaseMessages.getString(PKG, "GetStatusServlet.TopStatus") + "

"); + // It happens even with JDK 8. It's either something similar to the bug below + // http://bugs.java.com/bugdatabase/view_bug.do?bug_id=7075600 + // or caused by java.util.Date comparison. + // Instead of set java.util.Arrays.useLegacyMergeSort system property to true, + // let's simple ignore what happened... + try { + Collections.sort(transEntries, new Comparator() { + @Override + public int compare(CarteObjectEntry o1, CarteObjectEntry o2) { + Trans t1 = getTransformationMap().getTransformation(o1); + Trans t2 = getTransformationMap().getTransformation(o2); + + Date d1 = t1 == null ? null : t1.getLogDate(); + Date d2 = t2 == null ? null : t2.getLogDate(); + int cmpName = d1 == null || d2 == null + ? o1.getName().compareTo(o2.getName()) : d2.compareTo(d1); + return cmpName != 0 ? cmpName : o1.getId().compareTo(o2.getId()); + } + }); + } catch (Exception e) { + // fine, let's use the original list then + } + + try { + Collections.sort(jobEntries, new Comparator() { + @Override + public int compare(CarteObjectEntry o1, CarteObjectEntry o2) { + Job j1 = getJobMap().getJob(o1); + Job j2 = getJobMap().getJob(o2); + + Date d1 = j1 == null ? null : j1.getLogDate(); + Date d2 = j2 == null ? null : j2.getLogDate(); + int cmpName = d1 == null || d2 == null + ? o1.getName().compareTo(o2.getName()) : d2.compareTo(d1); + return cmpName != 0 ? cmpName : o1.getId().compareTo(o2.getId()); + } + }); + } catch (Exception e) { + // fine, let's use the original list then + } + try { out.println(""); out.print(""); - Collections.sort(transEntries); - for (CarteObjectEntry entry : transEntries) { String name = entry.getName(); String id = entry.getId(); @@ -291,12 +335,13 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) thro + BaseMessages.getString(PKG, "GetStatusServlet.LastLogDate") + ""); - Collections.sort(jobEntries); - for (CarteObjectEntry entry : jobEntries) { String name = entry.getName(); String id = entry.getId(); Job job = getJobMap().getJob(entry); + String realName = job.getParameterValue(JOB_NAME_PARAM); + realName = realName == null + ? name : new StringBuilder(name).append('(').append(realName.trim()).append(')').toString(); String status = job.getStatus(); String removeText; @@ -312,7 +357,7 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) thro out.print(""); out.print(""); + + URLEncoder.encode(name, "UTF-8") + "&id=" + id + "\">" + realName + ""); out.print(""); out.print(""); out.print("
" @@ -253,8 +299,6 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) thro + BaseMessages.getString(PKG, "GetStatusServlet.LastLogDate") + " " + BaseMessages.getString(PKG, "GetStatusServlet.Remove") + "
" + BaseMessages.getString(PKG, "GetStatusServlet.Remove") + "
" + name + "" + id + "" + status + "" From 58be3c21342ba94b18ff551b9051174e5143cafc Mon Sep 17 00:00:00 2001 From: Zhichun Wu Date: Sun, 28 Aug 2016 19:50:23 +0800 Subject: [PATCH 14/15] add formatted code baed on Pentaho CE 6.1.0.1-196 --- .../org/pentaho/di/cluster/SlaveServer.java | 1116 +++++++++++++++++ 1 file changed, 1116 insertions(+) create mode 100644 pentaho-kettle/src/main/java/org/pentaho/di/cluster/SlaveServer.java diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/cluster/SlaveServer.java b/pentaho-kettle/src/main/java/org/pentaho/di/cluster/SlaveServer.java new file mode 100644 index 0000000..8d6244b --- /dev/null +++ b/pentaho-kettle/src/main/java/org/pentaho/di/cluster/SlaveServer.java @@ -0,0 +1,1116 @@ +/*! ****************************************************************************** + * + * Pentaho Data Integration + * + * Copyright (C) 2002-2016 by Pentaho : http://www.pentaho.com + * + ******************************************************************************* + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + ******************************************************************************/ + +package org.pentaho.di.cluster; + +import org.apache.commons.httpclient.Header; +import org.apache.commons.httpclient.HttpClient; +import org.apache.commons.httpclient.UsernamePasswordCredentials; +import org.apache.commons.httpclient.auth.AuthScope; +import org.apache.commons.httpclient.methods.*; +import org.apache.commons.lang.StringUtils; +import org.pentaho.di.core.Const; +import org.pentaho.di.core.changed.ChangedFlag; +import org.pentaho.di.core.encryption.Encr; +import org.pentaho.di.core.exception.KettleException; +import org.pentaho.di.core.exception.KettleValueException; +import org.pentaho.di.core.logging.LogChannel; +import org.pentaho.di.core.logging.LogChannelInterface; +import org.pentaho.di.core.row.RowMetaInterface; +import org.pentaho.di.core.row.value.ValueMetaString; +import org.pentaho.di.core.variables.VariableSpace; +import org.pentaho.di.core.variables.Variables; +import org.pentaho.di.core.vfs.KettleVFS; +import org.pentaho.di.core.xml.XMLHandler; +import org.pentaho.di.core.xml.XMLInterface; +import org.pentaho.di.i18n.BaseMessages; +import org.pentaho.di.repository.*; +import org.pentaho.di.shared.SharedObjectInterface; +import org.pentaho.di.www.*; +import org.w3c.dom.Document; +import org.w3c.dom.Node; + +import java.io.*; +import java.net.InetAddress; +import java.net.URLEncoder; +import java.text.MessageFormat; +import java.util.*; + +public class SlaveServer extends ChangedFlag implements Cloneable, SharedObjectInterface, VariableSpace, + RepositoryElementInterface, XMLInterface { + private static Class PKG = SlaveServer.class; // for i18n purposes, needed by Translator2!! + + public static final String STRING_SLAVESERVER = "Slave Server"; + + private static final Random RANDOM = new Random(); + + public static final String XML_TAG = "slaveserver"; + + public static final RepositoryObjectType REPOSITORY_ELEMENT_TYPE = RepositoryObjectType.SLAVE_SERVER; + + private static final String HTTP = "http"; + private static final String HTTPS = "https"; + + public static final String SSL_MODE_TAG = "sslMode"; + + private static final int NOT_FOUND_ERROR = 404; + + public static final int KETTLE_CARTE_RETRIES = getNumberOfSlaveServerRetries(); + + public static final int KETTLE_CARTE_RETRY_BACKOFF_INCREMENTS = getBackoffIncrements(); + + private static int getNumberOfSlaveServerRetries() { + try { + return Integer.parseInt(Const.NVL(System.getProperty("KETTLE_CARTE_RETRIES"), "0")); + } catch (Exception e) { + return 0; + } + } + + public static int getBackoffIncrements() { + try { + return Integer.parseInt(Const.NVL(System.getProperty("KETTLE_CARTE_RETRY_BACKOFF_INCREMENTS"), "1000")); + } catch (Exception e) { + return 1000; + } + } + + private LogChannelInterface log; + + private String name; + + private String hostname; + + private String port; + + private String webAppName; + + private String username; + + private String password; + + private String proxyHostname; + + private String proxyPort; + + private String nonProxyHosts; + + private String propertiesMasterName; + + private boolean overrideExistingProperties; + + private boolean master; + + private boolean shared; + + private ObjectId id; + + private VariableSpace variables = new Variables(); + + private ObjectRevision objectRevision; + + private Date changedDate; + + private boolean sslMode; + + private SslConfiguration sslConfig; + + public SlaveServer() { + initializeVariablesFrom(null); + id = null; + this.log = new LogChannel(STRING_SLAVESERVER); + this.changedDate = new Date(); + } + + public SlaveServer(String name, String hostname, String port, String username, String password) { + this(name, hostname, port, username, password, null, null, null, false, false); + } + + public SlaveServer(String name, String hostname, String port, String username, String password, + String proxyHostname, String proxyPort, String nonProxyHosts, boolean master) { + this(name, hostname, port, username, password, proxyHostname, proxyPort, nonProxyHosts, master, false); + } + + public SlaveServer(String name, String hostname, String port, String username, String password, + String proxyHostname, String proxyPort, String nonProxyHosts, boolean master, boolean ssl) { + this(); + this.name = name; + this.hostname = hostname; + this.port = port; + this.username = username; + this.password = password; + + this.proxyHostname = proxyHostname; + this.proxyPort = proxyPort; + this.nonProxyHosts = nonProxyHosts; + + this.master = master; + initializeVariablesFrom(null); + this.log = new LogChannel(this); + } + + public SlaveServer(Node slaveNode) { + this(); + this.name = XMLHandler.getTagValue(slaveNode, "name"); + this.hostname = XMLHandler.getTagValue(slaveNode, "hostname"); + this.port = XMLHandler.getTagValue(slaveNode, "port"); + this.webAppName = XMLHandler.getTagValue(slaveNode, "webAppName"); + this.username = XMLHandler.getTagValue(slaveNode, "username"); + this.password = Encr.decryptPasswordOptionallyEncrypted(XMLHandler.getTagValue(slaveNode, "password")); + this.proxyHostname = XMLHandler.getTagValue(slaveNode, "proxy_hostname"); + this.proxyPort = XMLHandler.getTagValue(slaveNode, "proxy_port"); + this.nonProxyHosts = XMLHandler.getTagValue(slaveNode, "non_proxy_hosts"); + this.propertiesMasterName = XMLHandler.getTagValue(slaveNode, "get_properties_from_master"); + this.overrideExistingProperties = + "Y".equalsIgnoreCase(XMLHandler.getTagValue(slaveNode, "override_existing_properties")); + this.master = "Y".equalsIgnoreCase(XMLHandler.getTagValue(slaveNode, "master")); + initializeVariablesFrom(null); + this.log = new LogChannel(this); + + setSslMode("Y".equalsIgnoreCase(XMLHandler.getTagValue(slaveNode, SSL_MODE_TAG))); + Node sslConfig = XMLHandler.getSubNode(slaveNode, SslConfiguration.XML_TAG); + if (sslConfig != null) { + setSslMode(true); + this.sslConfig = new SslConfiguration(sslConfig); + } + } + + public LogChannelInterface getLogChannel() { + return log; + } + + public String getXML() { + StringBuilder xml = new StringBuilder(); + + xml.append("<").append(XML_TAG).append(">"); + + xml.append(XMLHandler.addTagValue("name", name, false)); + xml.append(XMLHandler.addTagValue("hostname", hostname, false)); + xml.append(XMLHandler.addTagValue("port", port, false)); + xml.append(XMLHandler.addTagValue("webAppName", webAppName, false)); + xml.append(XMLHandler.addTagValue("username", username, false)); + xml.append(XMLHandler.addTagValue("password", Encr.encryptPasswordIfNotUsingVariables(password), false)); + xml.append(XMLHandler.addTagValue("proxy_hostname", proxyHostname, false)); + xml.append(XMLHandler.addTagValue("proxy_port", proxyPort, false)); + xml.append(XMLHandler.addTagValue("non_proxy_hosts", nonProxyHosts, false)); + xml.append(XMLHandler.addTagValue("master", master, false)); + xml.append(XMLHandler.addTagValue(SSL_MODE_TAG, isSslMode(), false)); + if (sslConfig != null) { + xml.append(sslConfig.getXML()); + } + + xml.append(""); + + return xml.toString(); + } + + public Object clone() { + SlaveServer slaveServer = new SlaveServer(); + slaveServer.replaceMeta(this); + return slaveServer; + } + + public void replaceMeta(SlaveServer slaveServer) { + this.name = slaveServer.name; + this.hostname = slaveServer.hostname; + this.port = slaveServer.port; + this.webAppName = slaveServer.webAppName; + this.username = slaveServer.username; + this.password = slaveServer.password; + this.proxyHostname = slaveServer.proxyHostname; + this.proxyPort = slaveServer.proxyPort; + this.nonProxyHosts = slaveServer.nonProxyHosts; + this.master = slaveServer.master; + + this.id = slaveServer.id; + this.shared = slaveServer.shared; + this.setChanged(true); + this.sslMode = slaveServer.sslMode; + } + + public String toString() { + return name; + } + + public String getServerAndPort() { + String realHostname = environmentSubstitute(hostname); + if (!Const.isEmpty(realHostname)) { + return realHostname + getPortSpecification(); + } + return "Slave Server"; + } + + public boolean equals(Object obj) { + if (!(obj instanceof SlaveServer)) { + return false; + } + SlaveServer slave = (SlaveServer) obj; + return name.equalsIgnoreCase(slave.getName()); + } + + public int hashCode() { + return name.hashCode(); + } + + public String getHostname() { + return hostname; + } + + public void setHostname(String urlString) { + this.hostname = urlString; + } + + /** + * @return the password + */ + public String getPassword() { + return password; + } + + /** + * @param password the password to set + */ + public void setPassword(String password) { + this.password = password; + } + + /** + * @return the username + */ + public String getUsername() { + return username; + } + + /** + * @param username the username to set + */ + public void setUsername(String username) { + this.username = username; + } + + /** + * @return the username + */ + public String getWebAppName() { + return webAppName; + } + + /** + * @param webAppName the web application name to set + */ + public void setWebAppName(String webAppName) { + this.webAppName = webAppName; + } + + /** + * @return the nonProxyHosts + */ + public String getNonProxyHosts() { + return nonProxyHosts; + } + + /** + * @param nonProxyHosts the nonProxyHosts to set + */ + public void setNonProxyHosts(String nonProxyHosts) { + this.nonProxyHosts = nonProxyHosts; + } + + /** + * @return the proxyHostname + */ + public String getProxyHostname() { + return proxyHostname; + } + + /** + * @param proxyHostname the proxyHostname to set + */ + public void setProxyHostname(String proxyHostname) { + this.proxyHostname = proxyHostname; + } + + /** + * @return the proxyPort + */ + public String getProxyPort() { + return proxyPort; + } + + /** + * @param proxyPort the proxyPort to set + */ + public void setProxyPort(String proxyPort) { + this.proxyPort = proxyPort; + } + + /** + * @return the Master name for read properties + */ + public String getPropertiesMasterName() { + return propertiesMasterName; + } + + /** + * @return flag for read properties from Master + */ + public boolean isOverrideExistingProperties() { + return overrideExistingProperties; + } + + /** + * @return the port + */ + public String getPort() { + return port; + } + + /** + * @param port the port to set + */ + public void setPort(String port) { + this.port = port; + } + + public String getPortSpecification() { + String realPort = environmentSubstitute(port); + String portSpec = ":" + realPort; + if (Const.isEmpty(realPort) || port.equals("80")) { + portSpec = ""; + } + return portSpec; + } + + public String constructUrl(String serviceAndArguments) throws UnsupportedEncodingException { + String realHostname = environmentSubstitute(hostname); + if (!StringUtils.isBlank(webAppName)) { + serviceAndArguments = "/" + environmentSubstitute(getWebAppName()) + serviceAndArguments; + } + String retval = (isSslMode() ? HTTPS : HTTP) + "://" + realHostname + getPortSpecification() + serviceAndArguments; + retval = Const.replace(retval, " ", "%20"); + return retval; + } + + // Method is defined as package-protected in order to be accessible by unit tests + PostMethod buildSendXMLMethod(byte[] content, String service) throws Exception { + // Prepare HTTP put + // + String urlString = constructUrl(service); + if (log.isDebug()) { + log.logDebug(BaseMessages.getString(PKG, "SlaveServer.DEBUG_ConnectingTo", urlString)); + } + PostMethod postMethod = new PostMethod(urlString); + + // Request content will be retrieved directly from the input stream + // + RequestEntity entity = new ByteArrayRequestEntity(content); + + postMethod.setRequestEntity(entity); + postMethod.setDoAuthentication(true); + postMethod.addRequestHeader(new Header("Content-Type", "text/xml;charset=" + Const.XML_ENCODING)); + + return postMethod; + } + + public synchronized String sendXML(String xml, String service) throws Exception { + PostMethod method = buildSendXMLMethod(xml.getBytes(Const.XML_ENCODING), service); + + // Execute request + // + try { + int result = getHttpClient().executeMethod(method); + + // The status code + if (log.isDebug()) { + log.logDebug(BaseMessages.getString(PKG, "SlaveServer.DEBUG_ResponseStatus", Integer.toString(result))); + } + + String responseBody = getResponseBodyAsString(method.getResponseBodyAsStream()); + + if (log.isDebug()) { + log.logDebug(BaseMessages.getString(PKG, "SlaveServer.DEBUG_ResponseBody", responseBody)); + } + + if (result >= 400) { + String message; + if (result == NOT_FOUND_ERROR) { + message = String.format("%s%s%s%s", + BaseMessages.getString(PKG, "SlaveServer.Error.404.Title"), + Const.CR, Const.CR, + BaseMessages.getString(PKG, "SlaveServer.Error.404.Message") + ); + } else { + message = String.format("HTTP Status %d - %s - %s", + method.getStatusCode(), + method.getPath(), + method.getStatusText() + ); + } + throw new KettleException(message); + } + + return responseBody; + } finally { + // Release current connection to the connection pool once you are done + method.releaseConnection(); + if (log.isDetailed()) { + log.logDetailed(BaseMessages.getString(PKG, "SlaveServer.DETAILED_SentXmlToService", service, + environmentSubstitute(hostname))); + } + } + } + + // Method is defined as package-protected in order to be accessible by unit tests + PostMethod buildSendExportMethod(String type, String load, InputStream is) throws UnsupportedEncodingException { + String serviceUrl = RegisterPackageServlet.CONTEXT_PATH; + if (type != null && load != null) { + serviceUrl += + "/?" + AddExportServlet.PARAMETER_TYPE + "=" + type + "&" + AddExportServlet.PARAMETER_LOAD + "=" + + URLEncoder.encode(load, "UTF-8"); + } + + String urlString = constructUrl(serviceUrl); + if (log.isDebug()) { + log.logDebug(BaseMessages.getString(PKG, "SlaveServer.DEBUG_ConnectingTo", urlString)); + } + + PostMethod method = new PostMethod(urlString); + method.setRequestEntity(new InputStreamRequestEntity(is)); + method.setDoAuthentication(true); + method.addRequestHeader(new Header("Content-Type", "binary/zip")); + + return method; + } + + /** + * Send an exported archive over to this slave server + * + * @param filename The archive to send + * @param type The type of file to add to the slave server (AddExportServlet.TYPE_*) + * @param load The filename to load in the archive (the .kjb or .ktr) + * @return the XML of the web result + * @throws Exception in case something goes awry + */ + public String sendExport(String filename, String type, String load) throws Exception { + // Request content will be retrieved directly from the input stream + // + InputStream is = null; + try { + is = KettleVFS.getInputStream(KettleVFS.getFileObject(filename)); + + // Execute request + // + PostMethod method = buildSendExportMethod(type, load, is); + try { + int result = getHttpClient().executeMethod(method); + + // The status code + if (log.isDebug()) { + log.logDebug(BaseMessages.getString(PKG, "SlaveServer.DEBUG_ResponseStatus", Integer.toString(result))); + } + + String responseBody = getResponseBodyAsString(method.getResponseBodyAsStream()); + + // String body = post.getResponseBodyAsString(); + if (log.isDebug()) { + log.logDebug(BaseMessages.getString(PKG, "SlaveServer.DEBUG_ResponseBody", responseBody)); + } + + if (result >= 400) { + throw new KettleException(String.format("HTTP Status %d - %s - %s", method.getStatusCode(), method + .getPath(), method.getStatusText())); + } + + return responseBody; + } finally { + // Release current connection to the connection pool once you are done + method.releaseConnection(); + if (log.isDetailed()) { + log.logDetailed(BaseMessages.getString(PKG, "SlaveServer.DETAILED_SentExportToService", + RegisterPackageServlet.CONTEXT_PATH, environmentSubstitute(hostname))); + } + } + } finally { + try { + if (is != null) { + is.close(); + } + } catch (IOException ignored) { + // nothing to do here... + } + } + } + + public void addProxy(HttpClient client) { + String host = environmentSubstitute(this.hostname); + String phost = environmentSubstitute(this.proxyHostname); + String pport = environmentSubstitute(this.proxyPort); + String nonprox = environmentSubstitute(this.nonProxyHosts); + + if (!Const.isEmpty(phost) && !Const.isEmpty(pport)) { + // skip applying proxy if non-proxy host matches + if (!Const.isEmpty(nonprox) && !Const.isEmpty(host) && host.matches(nonprox)) { + return; + } + client.getHostConfiguration().setProxy(phost, Integer.parseInt(pport)); + } + } + + public void addCredentials(HttpClient client) { + client.getState().setCredentials( + new AuthScope(environmentSubstitute(hostname), Const.toInt(environmentSubstitute(port), 80)), + new UsernamePasswordCredentials(environmentSubstitute(username), Encr + .decryptPasswordOptionallyEncrypted(environmentSubstitute(password)))); + client.getParams().setAuthenticationPreemptive(true); + } + + /** + * @return the master + */ + public boolean isMaster() { + return master; + } + + /** + * @param master the master to set + */ + public void setMaster(boolean master) { + this.master = master; + } + + public String execService(String service, boolean retry) throws Exception { + int tries = 0; + int maxRetries = 0; + if (retry) { + maxRetries = KETTLE_CARTE_RETRIES; + } + while (true) { + try { + return execService(service); + } catch (Exception e) { + if (tries >= maxRetries) { + throw e; + } else { + try { + Thread.sleep(getDelay(tries)); + } catch (InterruptedException e2) { + //ignore + } + } + } + tries++; + } + } + + public static long getDelay(int trial) { + long current = KETTLE_CARTE_RETRY_BACKOFF_INCREMENTS; + long previous = 0; + for (int i = 0; i < trial; i++) { + long tmp = current; + current = current + previous; + previous = tmp; + } + return current + RANDOM.nextInt((int) Math.min(Integer.MAX_VALUE, current / 4L)); + } + + public synchronized String execService(String service) throws Exception { + return execService(service, new HashMap()); + } + + // Method is defined as package-protected in order to be accessible by unit tests + String getResponseBodyAsString(InputStream is) throws IOException { + BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(is)); + StringBuilder bodyBuffer = new StringBuilder(); + String line; + + try { + while ((line = bufferedReader.readLine()) != null) { + bodyBuffer.append(line); + } + } finally { + bufferedReader.close(); + } + + return bodyBuffer.toString(); + } + + // Method is defined as package-protected in order to be accessible by unit tests + GetMethod buildExecuteServiceMethod(String service, Map headerValues) + throws UnsupportedEncodingException { + GetMethod method = new GetMethod(constructUrl(service)); + + for (String key : headerValues.keySet()) { + method.setRequestHeader(key, headerValues.get(key)); + } + return method; + } + + public synchronized String execService(String service, Map headerValues) throws Exception { + // Prepare HTTP get + // + GetMethod method = buildExecuteServiceMethod(service, headerValues); + + // Execute request + // + try { + int result = getHttpClient().executeMethod(method); + + // The status code + if (log.isDebug()) { + log.logDebug(BaseMessages.getString(PKG, "SlaveServer.DEBUG_ResponseStatus", Integer.toString(result))); + } + + String responseBody = method.getResponseBodyAsString(); + + if (log.isDetailed()) { + log.logDetailed(BaseMessages.getString(PKG, "SlaveServer.DETAILED_FinishedReading", Integer + .toString(responseBody.getBytes().length))); + } + if (log.isDebug()) { + log.logDebug(BaseMessages.getString(PKG, "SlaveServer.DEBUG_ResponseBody", responseBody)); + } + + if (result >= 400) { + throw new KettleException(String.format("HTTP Status %d - %s - %s", method.getStatusCode(), method.getPath(), + method.getStatusText())); + } + + return responseBody; + } finally { + // Release current connection to the connection pool once you are done + method.releaseConnection(); + if (log.isDetailed()) { + log.logDetailed(BaseMessages.getString(PKG, "SlaveServer.DETAILED_ExecutedService", service, hostname)); + } + } + } + + // Method is defined as package-protected in order to be accessible by unit tests + HttpClient getHttpClient() { + HttpClient client = SlaveConnectionManager.getInstance().createHttpClient(); + addCredentials(client); + addProxy(client); + return client; + } + + public SlaveServerStatus getStatus() throws Exception { + String xml = execService(GetStatusServlet.CONTEXT_PATH + "/?xml=Y"); + return SlaveServerStatus.fromXML(xml); + } + + public List getSlaveServerDetections() throws Exception { + String xml = execService(GetSlavesServlet.CONTEXT_PATH + "/"); + Document document = XMLHandler.loadXMLString(xml); + Node detectionsNode = XMLHandler.getSubNode(document, GetSlavesServlet.XML_TAG_SLAVESERVER_DETECTIONS); + int nrDetections = XMLHandler.countNodes(detectionsNode, SlaveServerDetection.XML_TAG); + + List detections = new ArrayList(); + for (int i = 0; i < nrDetections; i++) { + Node detectionNode = XMLHandler.getSubNodeByNr(detectionsNode, SlaveServerDetection.XML_TAG, i); + SlaveServerDetection detection = new SlaveServerDetection(detectionNode); + detections.add(detection); + } + return detections; + } + + public SlaveServerTransStatus getTransStatus(String transName, String carteObjectId, int startLogLineNr) + throws Exception { + String xml = + execService(GetTransStatusServlet.CONTEXT_PATH + "/?name=" + URLEncoder.encode(transName, "UTF-8") + "&id=" + + Const.NVL(carteObjectId, "") + "&xml=Y&from=" + startLogLineNr, true); + return SlaveServerTransStatus.fromXML(xml); + } + + public SlaveServerJobStatus getJobStatus(String jobName, String carteObjectId, int startLogLineNr) throws Exception { + String xml = + execService(GetJobStatusServlet.CONTEXT_PATH + "/?name=" + URLEncoder.encode(jobName, "UTF-8") + "&id=" + + Const.NVL(carteObjectId, "") + "&xml=Y&from=" + startLogLineNr, true); + return SlaveServerJobStatus.fromXML(xml); + } + + public WebResult stopTransformation(String transName, String carteObjectId) throws Exception { + String xml = + execService(StopTransServlet.CONTEXT_PATH + "/?name=" + URLEncoder.encode(transName, "UTF-8") + "&id=" + + Const.NVL(carteObjectId, "") + "&xml=Y"); + return WebResult.fromXMLString(xml); + } + + public WebResult pauseResumeTransformation(String transName, String carteObjectId) throws Exception { + String xml = + execService(PauseTransServlet.CONTEXT_PATH + "/?name=" + URLEncoder.encode(transName, "UTF-8") + "&id=" + + Const.NVL(carteObjectId, "") + "&xml=Y"); + return WebResult.fromXMLString(xml); + } + + public WebResult removeTransformation(String transName, String carteObjectId) throws Exception { + String xml = + execService(RemoveTransServlet.CONTEXT_PATH + "/?name=" + URLEncoder.encode(transName, "UTF-8") + "&id=" + + Const.NVL(carteObjectId, "") + "&xml=Y"); + return WebResult.fromXMLString(xml); + } + + public WebResult removeJob(String jobName, String carteObjectId) throws Exception { + String xml = + execService(RemoveJobServlet.CONTEXT_PATH + "/?name=" + URLEncoder.encode(jobName, "UTF-8") + "&id=" + + Const.NVL(carteObjectId, "") + "&xml=Y"); + return WebResult.fromXMLString(xml); + } + + public WebResult stopJob(String transName, String carteObjectId) throws Exception { + String xml = + execService(StopJobServlet.CONTEXT_PATH + "/?name=" + URLEncoder.encode(transName, "UTF-8") + "&xml=Y&id=" + + Const.NVL(carteObjectId, "")); + return WebResult.fromXMLString(xml); + } + + public WebResult startTransformation(String transName, String carteObjectId) throws Exception { + String xml = + execService(StartTransServlet.CONTEXT_PATH + "/?name=" + URLEncoder.encode(transName, "UTF-8") + "&id=" + + Const.NVL(carteObjectId, "") + "&xml=Y"); + return WebResult.fromXMLString(xml); + } + + public WebResult startJob(String jobName, String carteObjectId) throws Exception { + String xml = + execService(StartJobServlet.CONTEXT_PATH + "/?name=" + URLEncoder.encode(jobName, "UTF-8") + "&xml=Y&id=" + + Const.NVL(carteObjectId, "")); + return WebResult.fromXMLString(xml); + } + + public WebResult cleanupTransformation(String transName, String carteObjectId) throws Exception { + String xml = + execService(CleanupTransServlet.CONTEXT_PATH + "/?name=" + URLEncoder.encode(transName, "UTF-8") + "&id=" + + Const.NVL(carteObjectId, "") + "&xml=Y"); + return WebResult.fromXMLString(xml); + } + + public synchronized WebResult deAllocateServerSockets(String transName, String clusteredRunId) throws Exception { + String xml = + execService(CleanupTransServlet.CONTEXT_PATH + "/?name=" + URLEncoder.encode(transName, "UTF-8") + "&id=" + + Const.NVL(clusteredRunId, "") + "&xml=Y&sockets=Y"); + return WebResult.fromXMLString(xml); + } + + public Properties getKettleProperties() throws Exception { + String xml = execService(GetPropertiesServlet.CONTEXT_PATH + "/?xml=Y"); + InputStream in = new ByteArrayInputStream(xml.getBytes()); + Properties properties = new Properties(); + properties.loadFromXML(in); + return properties; + } + + public static SlaveServer findSlaveServer(List slaveServers, String name) { + for (SlaveServer slaveServer : slaveServers) { + if (slaveServer.getName() != null && slaveServer.getName().equalsIgnoreCase(name)) { + return slaveServer; + } + } + return null; + } + + public static SlaveServer findSlaveServer(List slaveServers, ObjectId id) { + for (SlaveServer slaveServer : slaveServers) { + if (slaveServer.getObjectId() != null && slaveServer.getObjectId().equals(id)) { + return slaveServer; + } + } + return null; + } + + public static String[] getSlaveServerNames(List slaveServers) { + String[] names = new String[slaveServers.size()]; + for (int i = 0; i < slaveServers.size(); i++) { + SlaveServer slaveServer = slaveServers.get(i); + names[i] = slaveServer.getName(); + } + return names; + } + + public synchronized int allocateServerSocket(String runId, int portRangeStart, String hostname, + String transformationName, String sourceSlaveName, + String sourceStepName, String sourceStepCopy, + String targetSlaveName, String targetStepName, String targetStepCopy) + throws Exception { + + // Look up the IP address of the given hostname + // Only this way we'll be to allocate on the correct host. + // + InetAddress inetAddress = InetAddress.getByName(hostname); + String address = inetAddress.getHostAddress(); + + String service = AllocateServerSocketServlet.CONTEXT_PATH + "/?"; + service += AllocateServerSocketServlet.PARAM_RANGE_START + "=" + Integer.toString(portRangeStart); + service += "&" + AllocateServerSocketServlet.PARAM_ID + "=" + URLEncoder.encode(runId, "UTF-8"); + service += "&" + AllocateServerSocketServlet.PARAM_HOSTNAME + "=" + address; + service += + "&" + AllocateServerSocketServlet.PARAM_TRANSFORMATION_NAME + "=" + + URLEncoder.encode(transformationName, "UTF-8"); + service += + "&" + AllocateServerSocketServlet.PARAM_SOURCE_SLAVE + "=" + URLEncoder.encode(sourceSlaveName, "UTF-8"); + service += + "&" + AllocateServerSocketServlet.PARAM_SOURCE_STEPNAME + "=" + URLEncoder.encode(sourceStepName, "UTF-8"); + service += + "&" + AllocateServerSocketServlet.PARAM_SOURCE_STEPCOPY + "=" + URLEncoder.encode(sourceStepCopy, "UTF-8"); + service += + "&" + AllocateServerSocketServlet.PARAM_TARGET_SLAVE + "=" + URLEncoder.encode(targetSlaveName, "UTF-8"); + service += + "&" + AllocateServerSocketServlet.PARAM_TARGET_STEPNAME + "=" + URLEncoder.encode(targetStepName, "UTF-8"); + service += + "&" + AllocateServerSocketServlet.PARAM_TARGET_STEPCOPY + "=" + URLEncoder.encode(targetStepCopy, "UTF-8"); + service += "&xml=Y"; + String xml = execService(service); + Document doc = XMLHandler.loadXMLString(xml); + String portString = XMLHandler.getTagValue(doc, AllocateServerSocketServlet.XML_TAG_PORT); + + int port = Const.toInt(portString, -1); + if (port < 0) { + throw new Exception("Unable to retrieve port from service : " + service + ", received : \n" + xml); + } + + return port; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public boolean isShared() { + return shared; + } + + public void setShared(boolean shared) { + this.shared = shared; + } + + public void copyVariablesFrom(VariableSpace space) { + variables.copyVariablesFrom(space); + } + + public String environmentSubstitute(String aString) { + return variables.environmentSubstitute(aString); + } + + public String[] environmentSubstitute(String[] aString) { + return variables.environmentSubstitute(aString); + } + + public String fieldSubstitute(String aString, RowMetaInterface rowMeta, Object[] rowData) + throws KettleValueException { + return variables.fieldSubstitute(aString, rowMeta, rowData); + } + + public VariableSpace getParentVariableSpace() { + return variables.getParentVariableSpace(); + } + + public void setParentVariableSpace(VariableSpace parent) { + variables.setParentVariableSpace(parent); + } + + public String getVariable(String variableName, String defaultValue) { + return variables.getVariable(variableName, defaultValue); + } + + public String getVariable(String variableName) { + return variables.getVariable(variableName); + } + + public boolean getBooleanValueOfVariable(String variableName, boolean defaultValue) { + if (!Const.isEmpty(variableName)) { + String value = environmentSubstitute(variableName); + if (!Const.isEmpty(value)) { + return ValueMetaString.convertStringToBoolean(value); + } + } + return defaultValue; + } + + public void initializeVariablesFrom(VariableSpace parent) { + variables.initializeVariablesFrom(parent); + } + + public String[] listVariables() { + return variables.listVariables(); + } + + public void setVariable(String variableName, String variableValue) { + variables.setVariable(variableName, variableValue); + } + + public void shareVariablesWith(VariableSpace space) { + variables = space; + } + + public void injectVariables(Map prop) { + variables.injectVariables(prop); + } + + public ObjectId getObjectId() { + return id; + } + + public void setObjectId(ObjectId id) { + this.id = id; + } + + /** + * Not used in this case, simply return root / + */ + public RepositoryDirectoryInterface getRepositoryDirectory() { + return new RepositoryDirectory(); + } + + public void setRepositoryDirectory(RepositoryDirectoryInterface repositoryDirectory) { + throw new RuntimeException("Setting a directory on a database connection is not supported"); + } + + public RepositoryObjectType getRepositoryElementType() { + return REPOSITORY_ELEMENT_TYPE; + } + + public ObjectRevision getObjectRevision() { + return objectRevision; + } + + public void setObjectRevision(ObjectRevision objectRevision) { + this.objectRevision = objectRevision; + } + + public String getDescription() { + // NOT USED + return null; + } + + public void setDescription(String description) { + // NOT USED + } + + /** + * Verify the name of the slave server and if required, change it if it already exists in the list of slave servers. + * + * @param slaveServers the slave servers to check against. + * @param oldname the old name of the slave server + * @return the new slave server name + */ + public String verifyAndModifySlaveServerName(List slaveServers, String oldname) { + String name = getName(); + if (name.equalsIgnoreCase(oldname)) { + return name; // nothing to see here: move along! + } + + int nr = 2; + while (SlaveServer.findSlaveServer(slaveServers, getName()) != null) { + setName(name + " " + nr); + nr++; + } + return getName(); + } + + /** + * Sniff rows on a the slave server, return xml containing the row metadata and data. + * + * @param transName transformation name + * @param stepName step name + * @param copyNr step copy number + * @param lines lines number + * @param type step type + * @return xml with row metadata and data + * @throws Exception + */ + public String sniffStep(String transName, String stepName, String copyNr, int lines, String type) throws Exception { + return execService(SniffStepServlet.CONTEXT_PATH + "/?trans=" + URLEncoder.encode(transName, "UTF-8") + "&step=" + + URLEncoder.encode(stepName, "UTF-8") + "©nr=" + copyNr + "&type=" + type + "&lines=" + lines + "&xml=Y"); + } + + public long getNextSlaveSequenceValue(String slaveSequenceName, long incrementValue) throws KettleException { + try { + String xml = + execService(NextSequenceValueServlet.CONTEXT_PATH + "/" + "?" + NextSequenceValueServlet.PARAM_NAME + "=" + + URLEncoder.encode(slaveSequenceName, "UTF-8") + "&" + NextSequenceValueServlet.PARAM_INCREMENT + "=" + + Long.toString(incrementValue)); + + Document doc = XMLHandler.loadXMLString(xml); + Node seqNode = XMLHandler.getSubNode(doc, NextSequenceValueServlet.XML_TAG); + String nextValueString = XMLHandler.getTagValue(seqNode, NextSequenceValueServlet.XML_TAG_VALUE); + String errorString = XMLHandler.getTagValue(seqNode, NextSequenceValueServlet.XML_TAG_ERROR); + + if (!Const.isEmpty(errorString)) { + throw new KettleException(errorString); + } + if (Const.isEmpty(nextValueString)) { + throw new KettleException("No value retrieved from slave sequence '" + slaveSequenceName + "' on slave " + + toString()); + } + long nextValue = Const.toLong(nextValueString, Long.MIN_VALUE); + if (nextValue == Long.MIN_VALUE) { + throw new KettleException("Incorrect value '" + nextValueString + "' retrieved from slave sequence '" + + slaveSequenceName + "' on slave " + toString()); + } + + return nextValue; + } catch (Exception e) { + throw new KettleException("There was a problem retrieving a next sequence value from slave sequence '" + + slaveSequenceName + "' on slave " + toString(), e); + } + } + + public SlaveServer getClient() { + String pHostName = getHostname(); + String pPort = getPort(); + String name = MessageFormat.format("Dynamic slave [{0}:{1}]", pHostName, pPort); + SlaveServer client = new SlaveServer(name, pHostName, "" + pPort, getUsername(), getPassword()); + client.setSslMode(isSslMode()); + return client; + } + + /** + * @return the changedDate + */ + public Date getChangedDate() { + return changedDate; + } + + /** + * @param sslMode + */ + public void setSslMode(boolean sslMode) { + this.sslMode = sslMode; + } + + /** + * @return the sslMode + */ + public boolean isSslMode() { + return sslMode; + } + + /** + * @return the sslConfig + */ + public SslConfiguration getSslConfig() { + return sslConfig; + } +} From b329da94fac829722cea5d0696cb39f428da7037 Mon Sep 17 00:00:00 2001 From: Zhichun Wu Date: Sun, 28 Aug 2016 19:54:56 +0800 Subject: [PATCH 15/15] Add new option to force SSL communication between master and slave --- .../java/org/pentaho/di/cluster/SlaveServer.java | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/pentaho-kettle/src/main/java/org/pentaho/di/cluster/SlaveServer.java b/pentaho-kettle/src/main/java/org/pentaho/di/cluster/SlaveServer.java index 8d6244b..375b912 100644 --- a/pentaho-kettle/src/main/java/org/pentaho/di/cluster/SlaveServer.java +++ b/pentaho-kettle/src/main/java/org/pentaho/di/cluster/SlaveServer.java @@ -78,6 +78,8 @@ public class SlaveServer extends ChangedFlag implements Cloneable, SharedObjectI public static final int KETTLE_CARTE_RETRY_BACKOFF_INCREMENTS = getBackoffIncrements(); + public static final boolean KETTLE_FORCED_SSL = getForcedSsl(); + private static int getNumberOfSlaveServerRetries() { try { return Integer.parseInt(Const.NVL(System.getProperty("KETTLE_CARTE_RETRIES"), "0")); @@ -94,6 +96,18 @@ public static int getBackoffIncrements() { } } + public static boolean getForcedSsl() + { + try + { + return "Y".equalsIgnoreCase(System.getProperty("KETTLE_FORCED_SSL")); + } + catch(Exception e) + { + return false; + } + } + private LogChannelInterface log; private String name; @@ -1104,7 +1118,7 @@ public void setSslMode(boolean sslMode) { * @return the sslMode */ public boolean isSslMode() { - return sslMode; + return KETTLE_FORCED_SSL || sslMode; } /**