idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
34,100 | private String getSchemaStringFromFile ( File schemaFile ) { String schemaString ; FileInputStream fis = null ; try { fis = new FileInputStream ( schemaFile ) ; schemaString = AvroUtils . inputStreamToString ( fis ) ; } catch ( IOException e ) { throw new DatasetException ( e ) ; } finally { if ( fis != null ) { try { fis . close ( ) ; } catch ( IOException e ) { } } } return schemaString ; } | Will return the contents of schemaFile as a string |
34,101 | private List < String > getSchemaStringsFromDir ( File dir ) { List < String > schemaStrings = new ArrayList < String > ( ) ; Collection < File > schemaFiles = FileUtils . listFiles ( dir , new SuffixFileFilter ( ".avsc" ) , TrueFileFilter . INSTANCE ) ; for ( File schemaFile : schemaFiles ) { schemaStrings . add ( getSchemaStringFromFile ( schemaFile ) ) ; } return schemaStrings ; } | Gets the list of HBase Common Avro schema strings from dir . It recursively searches dir to find files that end in . avsc to locate those strings . |
34,102 | private List < String > getSchemaStringsFromJar ( String jarPath , String directoryPath ) { LOG . info ( "Getting schema strings in: " + directoryPath + ", from jar: " + jarPath ) ; JarFile jar ; try { jar = new JarFile ( URLDecoder . decode ( jarPath , "UTF-8" ) ) ; } catch ( UnsupportedEncodingException e ) { throw new DatasetException ( e ) ; } catch ( IOException e ) { throw new DatasetException ( e ) ; } Enumeration < JarEntry > entries = jar . entries ( ) ; List < String > schemaStrings = new ArrayList < String > ( ) ; while ( entries . hasMoreElements ( ) ) { JarEntry jarEntry = entries . nextElement ( ) ; if ( jarEntry . getName ( ) . startsWith ( directoryPath ) && jarEntry . getName ( ) . endsWith ( ".avsc" ) ) { LOG . info ( "Found schema: " + jarEntry . getName ( ) ) ; InputStream inputStream ; try { inputStream = jar . getInputStream ( jarEntry ) ; } catch ( IOException e ) { throw new DatasetException ( e ) ; } String schemaString = AvroUtils . inputStreamToString ( inputStream ) ; schemaStrings . add ( schemaString ) ; } } return schemaStrings ; } | Gets the list of HBase Common Avro schema strings from a directory in the Jar . It recursively searches the directory in the jar to find files that end in . avsc to locate thos strings . |
34,103 | private static void setMetaStoreURI ( Configuration conf , Map < String , String > match ) { try { String host = match . get ( URIPattern . HOST ) ; if ( host != null && ! NOT_SET . equals ( host ) ) { int port ; try { port = Integer . parseInt ( match . get ( URIPattern . PORT ) ) ; } catch ( NumberFormatException e ) { port = UNSPECIFIED_PORT ; } conf . set ( HIVE_METASTORE_URI_PROP , new URI ( "thrift" , null , host , port , null , null , null ) . toString ( ) ) ; } } catch ( URISyntaxException ex ) { throw new DatasetOperationException ( "Could not build metastore URI" , ex ) ; } } | Sets the MetaStore URI in the given Configuration if there is a host in the match arguments . If there is no host then the conf is not changed . |
34,104 | public void copy ( boolean isFragmentMode ) throws XMLStreamException { int ev = isFragmentMode ? XMLStreamConstants . START_ELEMENT : XMLStreamConstants . START_DOCUMENT ; reader . require ( ev , null , null ) ; int depth = 0 ; ev = reader . getEventType ( ) ; while ( true ) { switch ( ev ) { case XMLStreamConstants . START_ELEMENT : { writer . writeStartElement ( nonNull ( reader . getPrefix ( ) ) , reader . getLocalName ( ) , nonNull ( reader . getNamespaceURI ( ) ) ) ; copyAttributes ( ) ; copyNamespaces ( ) ; depth ++ ; break ; } case XMLStreamConstants . END_ELEMENT : { writer . writeEndElement ( ) ; depth -- ; if ( isFragmentMode && depth == 0 ) { writer . flush ( ) ; return ; } break ; } case XMLStreamConstants . ATTRIBUTE : { copyAttribute ( 0 ) ; break ; } case XMLStreamConstants . START_DOCUMENT : { copyStartDocument ( ) ; break ; } case XMLStreamConstants . END_DOCUMENT : { writer . writeEndDocument ( ) ; writer . flush ( ) ; return ; } case XMLStreamConstants . PROCESSING_INSTRUCTION : { writer . writeProcessingInstruction ( reader . getPITarget ( ) , reader . getPIData ( ) ) ; break ; } case XMLStreamConstants . COMMENT : { writer . writeComment ( reader . getText ( ) ) ; break ; } case XMLStreamConstants . CDATA : { writer . writeCData ( reader . getText ( ) ) ; break ; } case XMLStreamConstants . SPACE : case XMLStreamConstants . CHARACTERS : { copyText ( ) ; break ; } case XMLStreamConstants . ENTITY_REFERENCE : { copyText ( ) ; break ; } case XMLStreamConstants . DTD : { copyDTD ( ) ; break ; } case XMLStreamConstants . ENTITY_DECLARATION : break ; case XMLStreamConstants . NOTATION_DECLARATION : break ; case XMLStreamConstants . NAMESPACE : { writer . writeNamespace ( reader . getPrefix ( ) , reader . getNamespaceURI ( ) ) ; break ; } default : { throw new XMLStreamException ( "Unrecognized event type: " + reader . getEventType ( ) ) ; } } ev = reader . next ( ) ; } } | Reads all events from the reader and pipes them into the writer . |
34,105 | public static Put mergePuts ( byte [ ] keyBytes , List < Put > putList ) { Put put = new Put ( keyBytes ) ; for ( Put putToMerge : putList ) { Map < byte [ ] , List < KeyValue > > familyMap = ( Map < byte [ ] , List < KeyValue > > ) GET_FAMILY_MAP_METHOD . invoke ( putToMerge ) ; for ( List < KeyValue > keyValueList : familyMap . values ( ) ) { for ( KeyValue keyValue : keyValueList ) { put . add ( keyValue . getFamily ( ) , keyValue . getQualifier ( ) , keyValue . getTimestamp ( ) , keyValue . getValue ( ) ) ; } } } return put ; } | Given a list of puts create a new put with the values in each put merged together . It is expected that no puts have a value for the same fully qualified column . Return the new put . |
34,106 | public static PutAction mergePutActions ( byte [ ] keyBytes , List < PutAction > putActionList ) { VersionCheckAction checkAction = null ; List < Put > putsToMerge = new ArrayList < Put > ( ) ; for ( PutAction putActionToMerge : putActionList ) { putsToMerge . add ( putActionToMerge . getPut ( ) ) ; VersionCheckAction checkActionToMerge = putActionToMerge . getVersionCheckAction ( ) ; if ( checkActionToMerge != null ) { checkAction = checkActionToMerge ; } } Put put = mergePuts ( keyBytes , putsToMerge ) ; return new PutAction ( put , checkAction ) ; } | Given a list of PutActions create a new PutAction with the values in each put merged together . It is expected that no puts have a value for the same fully qualified column . Return the new PutAction . |
34,107 | private static void addColumnsToOperation ( Collection < String > columns , Operation operation ) { Set < String > familySet = new HashSet < String > ( ) ; for ( String column : columns ) { String [ ] familyAndColumn = column . split ( ":" ) ; if ( familyAndColumn . length == 1 ) { familySet . add ( familyAndColumn [ 0 ] ) ; operation . addFamily ( Bytes . toBytes ( familyAndColumn [ 0 ] ) ) ; } else { if ( ! familySet . contains ( familyAndColumn [ 0 ] ) ) { operation . addColumn ( Bytes . toBytes ( familyAndColumn [ 0 ] ) , Bytes . toBytes ( familyAndColumn [ 1 ] ) ) ; } } } } | Add a Collection of Columns to an Operation Only Add Single Columns If Their Family Isn t Already Being Added . |
34,108 | public static void addColumnsToScan ( Collection < String > columns , final Scan scan ) { addColumnsToOperation ( columns , new Operation ( ) { public void addColumn ( byte [ ] family , byte [ ] column ) { scan . addColumn ( family , column ) ; } public void addFamily ( byte [ ] family ) { scan . addFamily ( family ) ; } } ) ; } | Add a Collection of Columns to a Scanner Only Add Single Columns If Their Family Isn t Already Being Added . |
34,109 | public static void addColumnsToGet ( Collection < String > columns , final Get get ) { addColumnsToOperation ( columns , new Operation ( ) { public void addColumn ( byte [ ] family , byte [ ] column ) { get . addColumn ( family , column ) ; } public void addFamily ( byte [ ] family ) { get . addFamily ( family ) ; } } ) ; } | Add a Collection of Columns to a Get Only Add Single Columns If Their Family Isn t Already Being Added . |
34,110 | protected void setupProperties ( XMLInputFactory factory ) { factory . setProperty ( XMLInputFactory . IS_NAMESPACE_AWARE , Boolean . TRUE ) ; factory . setProperty ( XMLInputFactory . IS_COALESCING , Boolean . TRUE ) ; factory . setProperty ( XMLInputFactory . SUPPORT_DTD , Boolean . TRUE ) ; try { factory . setProperty ( XMLInputFactory . IS_VALIDATING , Boolean . FALSE ) ; } catch ( IllegalArgumentException e ) { ; } try { factory . setProperty ( XMLInputFactory . IS_SUPPORTING_EXTERNAL_ENTITIES , Boolean . TRUE ) ; } catch ( IllegalArgumentException e ) { ; } factory . setXMLResolver ( new XMLResolver ( ) { public InputStream resolveEntity ( String publicID , String systemID , String baseURI , String namespace ) { return new InputStream ( ) { public int read ( ) { return - 1 ; } } ; } } ) ; String factoryName = factory . getClass ( ) . getName ( ) ; if ( factoryName . equals ( "com.ctc.wstx.stax.WstxInputFactory" ) ) { try { String P_LAZY_PARSING = "com.ctc.wstx.lazyParsing" ; factory . setProperty ( P_LAZY_PARSING , Boolean . FALSE ) ; } catch ( IllegalArgumentException e ) { ; } try { String P_CACHE_DTDS = "com.ctc.wstx.cacheDTDs" ; factory . setProperty ( P_CACHE_DTDS , Boolean . valueOf ( CACHE_DTDS ) ) ; } catch ( IllegalArgumentException e ) { ; } } else if ( factoryName . equals ( "com.sun.xml.stream.ZephyrParserFactory" ) ) { try { String P_REPORT_CDATA = "report-cdata-event" ; factory . setProperty ( P_REPORT_CDATA , Boolean . TRUE ) ; } catch ( IllegalArgumentException e ) { ; } } } | Initializes default parser properties if any . |
34,111 | private Path pathForMetadata ( String namespace , String name ) { return pathForMetadata ( rootDirectory , namespace , name ) ; } | Returns the path where this MetadataProvider will store metadata . |
34,112 | private static Path pathForMetadata ( Path root , String namespace , String name ) { return new Path ( FileSystemDatasetRepository . pathForDataset ( root , namespace , name ) , METADATA_DIRECTORY ) ; } | Returns the correct metadata path for the given dataset . |
34,113 | private static void checkExists ( FileSystem fs , Path location ) { try { if ( ! fs . exists ( location ) ) { throw new DatasetNotFoundException ( "Descriptor location does not exist: " + location ) ; } } catch ( IOException ex ) { throw new DatasetIOException ( "Cannot access descriptor location: " + location , ex ) ; } } | Precondition - style static validation that a dataset exists |
34,114 | public static < K extends SpecificRecord , S extends SpecificRecord > Dao < Map < String , S > > buildCompositeDaoWithEntityManager ( HTablePool tablePool , String tableName , List < Class < S > > subEntityClasses , SchemaManager schemaManager ) { List < EntityMapper < S > > entityMappers = new ArrayList < EntityMapper < S > > ( ) ; for ( Class < S > subEntityClass : subEntityClasses ) { String entityName = getSchemaFromEntityClass ( subEntityClass ) . getName ( ) ; entityMappers . add ( new VersionedAvroEntityMapper . Builder ( ) . setSchemaManager ( schemaManager ) . setTableName ( tableName ) . setEntityName ( entityName ) . setSpecific ( true ) . < S > build ( ) ) ; } return new SpecificMapCompositeAvroDao < S > ( tablePool , tableName , entityMappers ) ; } | Create a CompositeDao which will return SpecificRecord instances in a Map container . |
34,115 | private boolean advance ( ) { while ( iterators . size ( ) < depth || ! iterators . getLast ( ) . hasNext ( ) ) { if ( iterators . getLast ( ) . hasNext ( ) ) { current . add ( iterators . getLast ( ) . next ( ) ) ; iterators . add ( getLevel ( current ) . iterator ( ) ) ; } else { iterators . removeLast ( ) ; if ( iterators . isEmpty ( ) ) { return false ; } else { current . removeLast ( ) ; } } } return true ; } | Advance the iterator stack to the next item or return false if there are none left . |
34,116 | private static FormatBundle < DatasetKeyInputFormat > inputBundle ( Configuration conf ) { FormatBundle < DatasetKeyInputFormat > bundle = FormatBundle . forInput ( DatasetKeyInputFormat . class ) ; for ( Map . Entry < String , String > entry : conf ) { bundle . set ( entry . getKey ( ) , entry . getValue ( ) ) ; } return bundle ; } | Builds a FormatBundle for DatasetKeyInputFormat by copying a temp config . |
34,117 | public < T extends Enum < T > > T validateEnum ( Config config , String value , Class < T > type , T ... choices ) { if ( choices . length == 0 ) { choices = type . getEnumConstants ( ) ; } Preconditions . checkArgument ( choices . length > 0 ) ; try { T result = Enum . valueOf ( type , value ) ; if ( ! Arrays . asList ( choices ) . contains ( result ) ) { throw new IllegalArgumentException ( ) ; } return result ; } catch ( IllegalArgumentException e ) { throw new MorphlineCompilationException ( String . format ( "Invalid choice: '%s' (choose from {%s})" , value , Joiner . on ( "," ) . join ( choices ) ) , config ) ; } } | Validates that an enum of the given type with the given value exists and that this enum is contained in the given list of permitted choices ; finally returns that enum object . |
34,118 | public void add ( double value ) { count ++ ; min = Math . min ( min , value ) ; max = Math . max ( max , value ) ; sum += value ; sumOfSquares += value * value ; addQuantileValue ( value ) ; } | Adds the given data value to the data set |
34,119 | public void add ( ScalableStatistics other ) { count += other . count ; min = Math . min ( min , other . min ) ; max = Math . max ( max , other . max ) ; sum += other . sum ; sumOfSquares += other . sumOfSquares ; tdigest . add ( other . tdigest ) ; if ( other . exactValues != null ) { for ( int i = 0 ; i < other . numExactValues ; i ++ ) { addQuantileValue ( other . exactValues [ i ] ) ; } } } | Merges another statistics instance into this instance . For example this way the independent stats and quantiles of each separate MapReduce task can be merged into a single overall job summary statistic . |
34,120 | public byte [ ] asBytes ( ) { byte [ ] className = tdigest . getClass ( ) . getName ( ) . getBytes ( Charsets . UTF_8 ) ; int vlen = exactValues == null ? 0 : numExactValues ; ByteBuffer buf = ByteBuffer . allocate ( 4 + 8 * 5 + 4 + 4 + 8 * vlen + 4 + className . length + tdigest . byteSize ( ) + 4 ) ; buf . putInt ( MAGIC_CODE ) ; buf . putLong ( count ) ; buf . putDouble ( min ) ; buf . putDouble ( max ) ; buf . putDouble ( sum ) ; buf . putDouble ( sumOfSquares ) ; buf . putInt ( exactQuantilesThreshold ) ; buf . putInt ( exactValues == null ? - 1 : numExactValues ) ; if ( exactValues != null ) { for ( int i = 0 ; i < numExactValues ; i ++ ) { buf . putDouble ( exactValues [ i ] ) ; } } buf . putInt ( className . length ) ; buf . put ( className ) ; tdigest . asBytes ( buf ) ; buf . putInt ( MAGIC_CODE ) ; Preconditions . checkArgument ( buf . position ( ) == buf . capacity ( ) ) ; return buf . array ( ) ; } | Serializes the data structure into a byte array |
34,121 | public static ScalableStatistics fromBytes ( byte [ ] bytes ) { Preconditions . checkArgument ( bytes . length > 0 ) ; ByteBuffer buf = ByteBuffer . wrap ( bytes ) ; ScalableStatistics stats = new ScalableStatistics ( ) ; Preconditions . checkArgument ( buf . getInt ( ) == MAGIC_CODE ) ; stats . count = buf . getLong ( ) ; stats . min = buf . getDouble ( ) ; stats . max = buf . getDouble ( ) ; stats . sum = buf . getDouble ( ) ; stats . sumOfSquares = buf . getDouble ( ) ; stats . exactQuantilesThreshold = buf . getInt ( ) ; int vlen = buf . getInt ( ) ; if ( vlen >= 0 ) { stats . numExactValues = vlen ; stats . exactValues = new double [ vlen ] ; for ( int i = 0 ; i < vlen ; i ++ ) { stats . exactValues [ i ] = buf . getDouble ( ) ; } } else { stats . exactValues = null ; } byte [ ] classNameBytes = new byte [ buf . getInt ( ) ] ; buf . get ( classNameBytes ) ; String className = new String ( classNameBytes , Charsets . UTF_8 ) ; try { Method method = Class . forName ( className ) . getMethod ( "fromBytes" , ByteBuffer . class ) ; Preconditions . checkArgument ( Modifier . isStatic ( method . getModifiers ( ) ) ) ; stats . tdigest = ( TDigest ) method . invoke ( null , buf ) ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } Preconditions . checkArgument ( buf . getInt ( ) == MAGIC_CODE ) ; Preconditions . checkArgument ( buf . position ( ) == buf . capacity ( ) ) ; return stats ; } | Derializes the data structure from a byte array |
34,122 | private void createMd5SumFile ( FileSystem fs , String md5sum , Path remoteMd5Path ) throws IOException { FSDataOutputStream os = null ; try { os = fs . create ( remoteMd5Path , true ) ; os . writeBytes ( md5sum ) ; os . flush ( ) ; } catch ( Exception e ) { LOG . error ( "{}" , e ) ; } finally { if ( os != null ) { os . close ( ) ; } } } | This method creates an file that contains a line with a MD5 sum |
34,123 | private static Configuration configureHBaseCluster ( Configuration config , int zkClientPort , FileSystem hdfsFs , String bindIP , int masterPort , int regionserverPort ) throws IOException { config . set ( HConstants . ZOOKEEPER_CLIENT_PORT , Integer . toString ( zkClientPort ) ) ; Path hbaseDir = new Path ( hdfsFs . makeQualified ( hdfsFs . getHomeDirectory ( ) ) , "hbase" ) ; FSUtils . setRootDir ( config , hbaseDir ) ; hdfsFs . mkdirs ( hbaseDir ) ; config . set ( "fs.defaultFS" , hdfsFs . getUri ( ) . toString ( ) ) ; config . set ( "fs.default.name" , hdfsFs . getUri ( ) . toString ( ) ) ; FSUtils . setVersion ( hdfsFs , hbaseDir ) ; logger . info ( "HBase force binding to ip: " + bindIP ) ; config . set ( "hbase.master.ipc.address" , bindIP ) ; config . set ( HConstants . MASTER_PORT , Integer . toString ( masterPort ) ) ; config . set ( "hbase.regionserver.ipc.address" , bindIP ) ; config . set ( HConstants . REGIONSERVER_PORT , Integer . toString ( regionserverPort ) ) ; config . set ( HConstants . ZOOKEEPER_QUORUM , bindIP ) ; try { Field cachedHostname = DNS . class . getDeclaredField ( "cachedHostname" ) ; cachedHostname . setAccessible ( true ) ; Field modifiersField = Field . class . getDeclaredField ( "modifiers" ) ; modifiersField . setAccessible ( true ) ; modifiersField . setInt ( cachedHostname , cachedHostname . getModifiers ( ) & ~ Modifier . FINAL ) ; cachedHostname . set ( null , bindIP ) ; } catch ( Exception e ) { throw new IOException ( e ) ; } config . set ( HConstants . MASTER_INFO_PORT , "-1" ) ; config . set ( HConstants . REGIONSERVER_INFO_PORT , "-1" ) ; return config ; } | Configure the HBase cluster before launching it |
34,124 | private static void waitForHBaseToComeOnline ( MiniHBaseCluster hbaseCluster ) throws IOException , InterruptedException { while ( ! hbaseCluster . getMaster ( ) . isInitialized ( ) ) { Thread . sleep ( 1000 ) ; } hbaseCluster . getRegionServer ( 0 ) . waitForServerOnline ( ) ; HTable t = new HTable ( hbaseCluster . getConf ( ) , HBASE_META_TABLE ) ; ResultScanner s = t . getScanner ( new Scan ( ) ) ; while ( s . next ( ) != null ) { continue ; } s . close ( ) ; t . close ( ) ; } | Wait for the hbase cluster to start up and come online and then return . |
34,125 | public void signalReady ( Constraints viewConstraints ) { try { rootFileSystem . mkdirs ( signalDirectory ) ; } catch ( IOException e ) { throw new DatasetIOException ( "Unable to create signal manager directory: " + signalDirectory , e ) ; } String normalizedConstraints = getNormalizedConstraints ( viewConstraints ) ; Path signalPath = new Path ( signalDirectory , normalizedConstraints ) ; try { FSDataOutputStream os = rootFileSystem . create ( signalPath , true ) ; os . close ( ) ; } catch ( IOException e ) { throw new DatasetIOException ( "Could not access signal path: " + signalPath , e ) ; } } | Create a signal for the specified constraints . |
34,126 | public long getReadyTimestamp ( Constraints viewConstraints ) { String normalizedConstraints = getNormalizedConstraints ( viewConstraints ) ; Path signalPath = new Path ( signalDirectory , normalizedConstraints ) ; try { try { FileStatus signalStatus = rootFileSystem . getFileStatus ( signalPath ) ; return signalStatus . getModificationTime ( ) ; } catch ( final FileNotFoundException ex ) { } return - 1 ; } catch ( IOException e ) { throw new DatasetIOException ( "Could not access signal path: " + signalPath , e ) ; } } | Check the last time the specified constraints have been signaled as ready . |
34,127 | View < E > viewForUri ( URI location ) { Preconditions . checkNotNull ( location , "Partition location cannot be null" ) ; PartitionView < E > view = getPartitionView ( location ) ; if ( view == unbounded ) { return this ; } return view ; } | needed to preserve the behavior of FileSystemDatasets |
34,128 | protected boolean isExternal ( String namespace , String name ) { String resolved = resolveNamespace ( namespace , name ) ; if ( resolved != null ) { return isExternal ( getMetaStoreUtil ( ) . getTable ( resolved , name ) ) ; } return false ; } | Returns whether the table is a managed hive table . |
34,129 | private boolean isNamespace ( String database ) { Collection < String > tables = getMetaStoreUtil ( ) . getAllTables ( database ) ; for ( String name : tables ) { if ( isReadable ( database , name ) ) { return true ; } } return false ; } | Returns true if there is at least one table in the give database that can be read . |
34,130 | private boolean isReadable ( String namespace , String name ) { Table table = getMetaStoreUtil ( ) . getTable ( namespace , name ) ; if ( isManaged ( table ) || isExternal ( table ) ) { try { HiveUtils . descriptorForTable ( conf , table ) ; return true ; } catch ( DatasetException e ) { } catch ( IllegalStateException e ) { } catch ( IllegalArgumentException e ) { } catch ( UnsupportedOperationException e ) { } } return false ; } | Returns true if the given table exists and can be read by this library . |
34,131 | public synchronized void append ( LoggingEvent event ) throws FlumeException { if ( rpcClient == null ) { throw new FlumeException ( "Cannot Append to Appender!" + "Appender either closed or not setup correctly!" ) ; } if ( ! rpcClient . isActive ( ) ) { reconnect ( ) ; } Map < String , String > hdrs = new HashMap < String , String > ( ) ; hdrs . put ( Log4jAvroHeaders . LOGGER_NAME . toString ( ) , event . getLoggerName ( ) ) ; hdrs . put ( Log4jAvroHeaders . TIMESTAMP . toString ( ) , String . valueOf ( event . getTimeStamp ( ) ) ) ; hdrs . put ( Log4jAvroHeaders . LOG_LEVEL . toString ( ) , String . valueOf ( event . getLevel ( ) . toInt ( ) ) ) ; Event flumeEvent ; Object message = event . getMessage ( ) ; if ( message instanceof GenericRecord ) { GenericRecord record = ( GenericRecord ) message ; populateAvroHeaders ( hdrs , record . getSchema ( ) , message ) ; flumeEvent = EventBuilder . withBody ( serialize ( record , record . getSchema ( ) ) , hdrs ) ; } else if ( message instanceof SpecificRecord || avroReflectionEnabled ) { Schema schema = ReflectData . get ( ) . getSchema ( message . getClass ( ) ) ; populateAvroHeaders ( hdrs , schema , message ) ; flumeEvent = EventBuilder . withBody ( serialize ( message , schema ) , hdrs ) ; } else { hdrs . put ( Log4jAvroHeaders . MESSAGE_ENCODING . toString ( ) , "UTF8" ) ; String msg = layout != null ? layout . format ( event ) : message . toString ( ) ; flumeEvent = EventBuilder . withBody ( msg , Charset . forName ( "UTF8" ) , hdrs ) ; } try { rpcClient . append ( flumeEvent ) ; } catch ( EventDeliveryException e ) { String msg = "Flume append() failed." ; LogLog . error ( msg ) ; throw new FlumeException ( msg + " Exception follows." , e ) ; } } | Append the LoggingEvent to send to the first Flume hop . |
34,132 | public static String getJar ( Class < ? > klass ) { Preconditions . checkNotNull ( klass , "klass" ) ; ClassLoader loader = klass . getClassLoader ( ) ; if ( loader != null ) { String class_file = klass . getName ( ) . replaceAll ( "\\." , "/" ) + ".class" ; try { for ( Enumeration < ? > itr = loader . getResources ( class_file ) ; itr . hasMoreElements ( ) ; ) { URL url = ( URL ) itr . nextElement ( ) ; String path = url . getPath ( ) ; if ( path . startsWith ( "file:" ) ) { path = path . substring ( "file:" . length ( ) ) ; } path = URLDecoder . decode ( path , "UTF-8" ) ; if ( "jar" . equals ( url . getProtocol ( ) ) ) { path = URLDecoder . decode ( path , "UTF-8" ) ; return path . replaceAll ( "!.*$" , "" ) ; } else if ( "file" . equals ( url . getProtocol ( ) ) ) { String klassName = klass . getName ( ) ; klassName = klassName . replace ( "." , "/" ) + ".class" ; path = path . substring ( 0 , path . length ( ) - klassName . length ( ) ) ; File baseDir = new File ( path ) ; File testDir = new File ( System . getProperty ( "test.build.dir" , "target/test-dir" ) ) ; testDir = testDir . getAbsoluteFile ( ) ; if ( ! testDir . exists ( ) ) { if ( ! testDir . mkdirs ( ) ) { throw new IOException ( "Unable to create directory :" + testDir . toString ( ) ) ; } } File tempJar = File . createTempFile ( TMP_HADOOP , "" , testDir ) ; tempJar = new File ( tempJar . getAbsolutePath ( ) + ".jar" ) ; createJar ( baseDir , tempJar ) ; return tempJar . getAbsolutePath ( ) ; } } } catch ( IOException e ) { throw new RuntimeException ( e ) ; } } return null ; } | Returns the full path to the Jar containing the class . It always return a JAR . |
34,133 | public String readConfigName ( SolrZkClient zkClient , String collection ) throws KeeperException , InterruptedException { if ( collection == null ) { throw new IllegalArgumentException ( "collection must not be null" ) ; } String configName = null ; byte [ ] aliasData = zkClient . getData ( ZkStateReader . ALIASES , null , null , true ) ; Aliases aliases = ClusterState . load ( aliasData ) ; String alias = aliases . getCollectionAlias ( collection ) ; if ( alias != null ) { List < String > aliasList = StrUtils . splitSmart ( alias , "," , true ) ; if ( aliasList . size ( ) > 1 ) { throw new IllegalArgumentException ( "collection cannot be an alias that maps to multiple collections" ) ; } collection = aliasList . get ( 0 ) ; } String path = ZkStateReader . COLLECTIONS_ZKNODE + "/" + collection ; if ( LOG . isInfoEnabled ( ) ) { LOG . info ( "Load collection config from:" + path ) ; } byte [ ] data = zkClient . getData ( path , null , null , true ) ; if ( data != null ) { ZkNodeProps props = ZkNodeProps . load ( data ) ; configName = props . getStr ( ZkController . CONFIGNAME_PROP ) ; } if ( configName != null && ! zkClient . exists ( ZkConfigManager . CONFIGS_ZKNODE + "/" + configName , true ) ) { LOG . error ( "Specified config does not exist in ZooKeeper:" + configName ) ; throw new IllegalArgumentException ( "Specified config does not exist in ZooKeeper:" + configName ) ; } return configName ; } | Returns config value given collection name Borrowed heavily from Solr s ZKController . |
34,134 | public File downloadConfigDir ( SolrZkClient zkClient , String configName , File dir ) throws IOException , InterruptedException , KeeperException { Preconditions . checkArgument ( dir . exists ( ) ) ; Preconditions . checkArgument ( dir . isDirectory ( ) ) ; ZkConfigManager manager = new ZkConfigManager ( zkClient ) ; manager . downloadConfigDir ( configName , dir . toPath ( ) ) ; File confDir = new File ( dir , "conf" ) ; if ( ! confDir . isDirectory ( ) ) { confDir = new File ( Files . createTempDir ( ) . getAbsolutePath ( ) , "conf" ) ; confDir . getParentFile ( ) . deleteOnExit ( ) ; Files . move ( dir , confDir ) ; dir = confDir . getParentFile ( ) ; } verifyConfigDir ( confDir ) ; return dir ; } | Download and return the config directory from ZK |
34,135 | public static void check ( String namespace , String name , DatasetDescriptor descriptor ) { checkDatasetName ( namespace , name ) ; checkDescriptor ( descriptor ) ; } | Checks the name and descriptor for known compatibility issues and throws an exception if an incompatibility is found . |
34,136 | public static void checkAndWarn ( String namespace , String datasetName , Schema schema ) { try { checkDatasetName ( namespace , datasetName ) ; checkSchema ( schema ) ; } catch ( IllegalArgumentException e ) { LOG . warn ( e . getMessage ( ) ) ; } catch ( IllegalStateException e ) { LOG . warn ( e . getMessage ( ) ) ; } } | Checks the name and schema for known compatibility issues and warns . |
34,137 | public static void checkDatasetName ( String namespace , String name ) { Preconditions . checkNotNull ( namespace , "Namespace cannot be null" ) ; Preconditions . checkNotNull ( name , "Dataset name cannot be null" ) ; ValidationException . check ( Compatibility . isCompatibleName ( namespace ) , "Namespace %s is not alphanumeric (plus '_')" , namespace ) ; ValidationException . check ( Compatibility . isCompatibleName ( name ) , "Dataset name %s is not alphanumeric (plus '_')" , name ) ; } | Precondition - style validation that a dataset name is compatible . |
34,138 | public static void checkDescriptor ( DatasetDescriptor descriptor ) { Preconditions . checkNotNull ( descriptor , "Descriptor cannot be null" ) ; Schema schema = descriptor . getSchema ( ) ; checkSchema ( schema ) ; if ( descriptor . isPartitioned ( ) ) { Preconditions . checkArgument ( schema . getType ( ) == Schema . Type . RECORD , "[BUG] Partitioned datasets must have record schemas" ) ; Set < String > names = Sets . newHashSet ( ) ; for ( Schema . Field field : schema . getFields ( ) ) { names . add ( field . name ( ) ) ; } List < String > incompatible = Lists . newArrayList ( ) ; List < String > duplicates = Lists . newArrayList ( ) ; for ( FieldPartitioner fp : Accessor . getDefault ( ) . getFieldPartitioners ( descriptor . getPartitionStrategy ( ) ) ) { String name = fp . getName ( ) ; if ( ! isCompatibleName ( name ) ) { incompatible . add ( name ) ; } else if ( names . contains ( name ) ) { duplicates . add ( name ) ; } else { names . add ( name ) ; } } ValidationException . check ( incompatible . isEmpty ( ) , "Partition names are not alphanumeric (plus '_'): %s" , Joiner . on ( ", " ) . join ( incompatible ) ) ; ValidationException . check ( duplicates . isEmpty ( ) , "Partition names duplicate data fields: %s" , Joiner . on ( ", " ) . join ( duplicates ) ) ; } } | Precondition - style validation that the DatasetDescriptor is compatible . |
34,139 | private static List < String > getIncompatibleNames ( Schema schema ) { NameValidation validation = new NameValidation ( ) ; SchemaUtil . visit ( schema , validation ) ; return validation . getIncompatibleNames ( ) ; } | Returns a list of field names from the schema that contain characters that are known to be incompatible with some projects such as Hive or HBase . |
34,140 | public Record copy ( ) { ArrayListMultimap < String , Object > copy = ArrayListMultimap . create ( fields . size ( ) + 16 , 10 ) ; copy . putAll ( fields ) ; return new Record ( copy ) ; } | Returns a shallow copy of this record . |
34,141 | public Object getFirstValue ( String key ) { List values = fields . get ( key ) ; return values . size ( ) > 0 ? values . get ( 0 ) : null ; } | Returns the first value associated with the given key or null if no such value exists |
34,142 | public void replaceValues ( String key , Object value ) { List < Object > list = fields . get ( key ) ; list . clear ( ) ; list . add ( value ) ; } | Removes all values that are associated with the given key and then associates the given value with the given key . |
34,143 | public void putIfAbsent ( String key , Object value ) { if ( ! fields . containsEntry ( key , value ) ) { fields . put ( key , value ) ; } } | Adds the given value to the values currently associated with the given key iff the key isn t already associated with that same value . |
34,144 | public static URI build ( String repoUri , String namespace , String dataset ) { return build ( URI . create ( repoUri ) , namespace , dataset ) ; } | Builds a dataset URI from the given repository URI string namespace and dataset name . |
34,145 | public static URI build ( URI repoUri , String namespace , String dataset ) { return new URIBuilder ( repoUri , namespace , dataset ) . build ( ) ; } | Builds a dataset URI from the given repository URI namespace and dataset name . |
34,146 | public static boolean isConsistentWithExpectedType ( Schema . Type type , Class < ? > expectedClass ) { Class < ? > typeClass = TYPE_TO_CLASS . get ( type ) ; return typeClass != null && expectedClass . isAssignableFrom ( typeClass ) ; } | Checks that a schema type should produce an object of the expected class . |
34,147 | public static Schema partitionFieldSchema ( FieldPartitioner < ? , ? > fp , Schema schema ) { if ( fp instanceof IdentityFieldPartitioner ) { return fieldSchema ( schema , fp . getSourceName ( ) ) ; } else { Class < ? > fieldType = getPartitionType ( fp , schema ) ; if ( fieldType == Integer . class ) { return Schema . create ( Schema . Type . INT ) ; } else if ( fieldType == Long . class ) { return Schema . create ( Schema . Type . LONG ) ; } else if ( fieldType == String . class ) { return Schema . create ( Schema . Type . STRING ) ; } else { throw new ValidationException ( "Cannot encode partition " + fp . getName ( ) + " with type " + fp . getSourceType ( ) ) ; } } } | Builds a Schema for the FieldPartitioner using the given Schema to determine types not fixed by the FieldPartitioner . |
34,148 | private static Schema . Field partitionField ( FieldPartitioner < ? , ? > fp , Schema schema ) { return new Schema . Field ( fp . getName ( ) , partitionFieldSchema ( fp , schema ) , null , null ) ; } | Builds a Schema . Field for the FieldPartitioner using the Schema to determine types not fixed by the FieldPartitioner . |
34,149 | public static Set < MediaType > set ( MediaType ... types ) { Set < MediaType > set = new HashSet < MediaType > ( ) ; for ( MediaType type : types ) { if ( type != null ) { set . add ( type ) ; } } return Collections . unmodifiableSet ( set ) ; } | Convenience method that returns an unmodifiable set that contains all the given media types . |
34,150 | public static Set < MediaType > set ( String ... types ) { Set < MediaType > set = new HashSet < MediaType > ( ) ; for ( String type : types ) { MediaType mt = parse ( type ) ; if ( mt != null ) { set . add ( mt ) ; } } return Collections . unmodifiableSet ( set ) ; } | Convenience method that parses the given media type strings and returns an unmodifiable set that contains all the parsed types . |
34,151 | private static String unquote ( String s ) { while ( s . startsWith ( "\"" ) || s . startsWith ( "'" ) ) { s = s . substring ( 1 ) ; } while ( s . endsWith ( "\"" ) || s . endsWith ( "'" ) ) { s = s . substring ( 0 , s . length ( ) - 1 ) ; } return s ; } | Fuzzy unquoting mechanism that works also with somewhat malformed quotes . |
34,152 | public static < T > T readAvroEntity ( byte [ ] bytes , DatumReader < T > reader ) { Decoder decoder = new DecoderFactory ( ) . binaryDecoder ( bytes , null ) ; return AvroUtils . < T > readAvroEntity ( decoder , reader ) ; } | Given a byte array and a DatumReader decode an avro entity from the byte array . Decodes using the avro BinaryDecoder . Return the constructed entity . |
34,153 | public static < T > T readAvroEntity ( Decoder decoder , DatumReader < T > reader ) { try { return reader . read ( null , decoder ) ; } catch ( IOException e ) { throw new SerializationException ( "Could not deserialize Avro entity" , e ) ; } } | Decode an entity from the initialized Avro Decoder using the DatumReader . |
34,154 | public static < T > byte [ ] writeAvroEntity ( T entity , DatumWriter < T > writer ) { ByteArrayOutputStream outputStream = new ByteArrayOutputStream ( ) ; Encoder encoder = new EncoderFactory ( ) . binaryEncoder ( outputStream , null ) ; writeAvroEntity ( entity , encoder , writer ) ; return outputStream . toByteArray ( ) ; } | Given an entity and a DatumReader encode the avro entity to a byte array . Encodes using the avro BinaryEncoder . Return the serialized bytes . |
34,155 | public static < T > void writeAvroEntity ( T entity , Encoder encoder , DatumWriter < T > writer ) { try { writer . write ( entity , encoder ) ; encoder . flush ( ) ; } catch ( IOException e ) { throw new SerializationException ( "Could not serialize Avro entity" , e ) ; } } | Given an entity an avro schema and an encoder write the entity to the encoder s underlying output stream . |
34,156 | public static Field cloneField ( Field field ) { return new Field ( field . name ( ) , field . schema ( ) , field . doc ( ) , field . defaultValue ( ) ) ; } | Given an avro Schema . Field instance make a clone of it . |
34,157 | public static String inputStreamToString ( InputStream in ) { final int BUFFER_SIZE = 1024 ; BufferedReader bufferedReader ; try { bufferedReader = new BufferedReader ( new InputStreamReader ( in , "UTF-8" ) ) ; } catch ( UnsupportedEncodingException e ) { throw new DatasetException ( "Platform doesn't support UTF-8. It must!" , e ) ; } char [ ] buffer = new char [ BUFFER_SIZE ] ; StringBuilder stringBuilder = new StringBuilder ( BUFFER_SIZE ) ; int bytesRead = 0 ; try { while ( ( bytesRead = bufferedReader . read ( buffer , 0 , BUFFER_SIZE ) ) > 0 ) { stringBuilder . append ( buffer , 0 , bytesRead ) ; } } catch ( IOException e ) { throw new DatasetException ( "Error reading from input stream" , e ) ; } return stringBuilder . toString ( ) ; } | Convert an InputStream to a string encoded as UTF - 8 . |
34,158 | public static Map < String , Object > getDefaultValueMap ( Schema avroRecordSchema ) { List < Field > defaultFields = new ArrayList < Field > ( ) ; for ( Field f : avroRecordSchema . getFields ( ) ) { if ( f . defaultValue ( ) != null ) { defaultFields . add ( new Field ( f . name ( ) , f . schema ( ) , f . doc ( ) , f . defaultValue ( ) , f . order ( ) ) ) ; } } Schema defaultSchema = Schema . createRecord ( defaultFields ) ; Schema emptyRecordSchema = Schema . createRecord ( new ArrayList < Field > ( ) ) ; DatumWriter < GenericRecord > writer = new GenericDatumWriter < GenericRecord > ( emptyRecordSchema ) ; DatumReader < GenericRecord > reader = new GenericDatumReader < GenericRecord > ( emptyRecordSchema , defaultSchema ) ; GenericRecord emptyRecord = new GenericData . Record ( emptyRecordSchema ) ; GenericRecord defaultRecord = AvroUtils . readAvroEntity ( AvroUtils . writeAvroEntity ( emptyRecord , writer ) , reader ) ; Map < String , Object > defaultValueMap = new HashMap < String , Object > ( ) ; for ( Field f : defaultFields ) { defaultValueMap . put ( f . name ( ) , defaultRecord . get ( f . name ( ) ) ) ; } return defaultValueMap ; } | Get a map of field names to default values for an Avro schema . |
34,159 | public Map < String , byte [ ] > compile ( String fileName , String source , Writer err , String sourcePath , String classPath ) { DiagnosticCollector < JavaFileObject > diagnostics = new DiagnosticCollector < JavaFileObject > ( ) ; MemoryJavaFileManager manager = new MemoryJavaFileManager ( stdManager ) ; List < JavaFileObject > compUnits = new ArrayList < JavaFileObject > ( 1 ) ; compUnits . add ( manager . makeStringSource ( fileName , source ) ) ; List < String > options = new ArrayList < String > ( ) ; options . add ( "-Xlint:all" ) ; options . add ( "-deprecation" ) ; if ( sourcePath != null ) { options . add ( "-sourcepath" ) ; options . add ( sourcePath ) ; } if ( classPath != null ) { options . add ( "-classpath" ) ; options . add ( classPath ) ; } javax . tools . JavaCompiler . CompilationTask task = tool . getTask ( err , manager , diagnostics , options , null , compUnits ) ; if ( task . call ( ) == false ) { PrintWriter perr = new PrintWriter ( err ) ; for ( Diagnostic diagnostic : diagnostics . getDiagnostics ( ) ) { perr . println ( diagnostic . getMessage ( null ) ) ; } perr . flush ( ) ; return null ; } Map < String , byte [ ] > classBytes = manager . getClassBytes ( ) ; try { manager . close ( ) ; } catch ( IOException exp ) { } return classBytes ; } | compile given String source and return bytecodes as a Map . |
34,160 | public static < E > GenericData getDataModelForType ( Class < E > type ) { if ( SpecificRecord . class . isAssignableFrom ( type ) ) { return new SpecificData ( type . getClassLoader ( ) ) ; } else if ( IndexedRecord . class . isAssignableFrom ( type ) ) { return GenericData . get ( ) ; } else { return AllowNulls . get ( ) ; } } | Get the data model for the given type . |
34,161 | @ SuppressWarnings ( "unchecked" ) public static < E > DatumReader < E > getDatumReaderForType ( Class < E > type , Schema writerSchema ) { Schema readerSchema = getReaderSchema ( type , writerSchema ) ; GenericData dataModel = getDataModelForType ( type ) ; if ( dataModel instanceof ReflectData ) { return new ReflectDatumReader < E > ( writerSchema , readerSchema , ( ReflectData ) dataModel ) ; } else if ( dataModel instanceof SpecificData ) { return new SpecificDatumReader < E > ( writerSchema , readerSchema , ( SpecificData ) dataModel ) ; } else { return new GenericDatumReader < E > ( writerSchema , readerSchema , dataModel ) ; } } | Get the DatumReader for the given type . |
34,162 | public static < E > Schema getReaderSchema ( Class < E > type , Schema schema ) { Schema readerSchema = schema ; GenericData dataModel = getDataModelForType ( type ) ; if ( dataModel instanceof SpecificData ) { readerSchema = ( ( SpecificData ) dataModel ) . getSchema ( type ) ; } return readerSchema ; } | Get the reader schema based on the given type and writer schema . |
34,163 | public static < E > Schema getWriterSchema ( Class < E > type , Schema schema ) { Schema writerSchema = schema ; GenericData dataModel = getDataModelForType ( type ) ; if ( dataModel instanceof AllowNulls ) { dataModel = ReflectData . get ( ) ; } if ( dataModel instanceof SpecificData ) { writerSchema = ( ( SpecificData ) dataModel ) . getSchema ( type ) ; } return writerSchema ; } | Get the writer schema based on the given type and dataset schema . |
34,164 | @ SuppressWarnings ( "unchecked" ) public static < E > E createRecord ( Class < E > type , Schema schema ) { if ( isGeneric ( type ) && ! type . isInterface ( ) ) { if ( GenericData . Record . class . equals ( type ) ) { return ( E ) GenericData . get ( ) . newRecord ( null , schema ) ; } return ( E ) ReflectData . newInstance ( type , schema ) ; } return null ; } | If E implements GenericRecord but does not implement SpecificRecord then create a new instance of E using reflection so that GenericDataumReader will use the expected type . |
34,165 | String getHiveMetastoreUri ( Configuration conf ) { String metastoreUris = conf . get ( Loader . HIVE_METASTORE_URI_PROP ) ; if ( metastoreUris == null ) { return null ; } String [ ] uriArray = metastoreUris . split ( HIVE_METASTORE_URIS_SEPARATOR ) ; return uriArray [ 0 ] ; } | This method extracts one URI for the Hive metastore . The hive . metastore . uris property in the parameter Configuration object can contain a list of uris but since Kite does not support highly available Hive metastore currently we need to make sure that only the first one is retrieved . |
34,166 | public boolean isInAlphabet ( final byte [ ] arrayOctet , final boolean allowWSPad ) { for ( int i = 0 ; i < arrayOctet . length ; i ++ ) { if ( ! isInAlphabet ( arrayOctet [ i ] ) && ( ! allowWSPad || ( arrayOctet [ i ] != PAD ) && ! isWhiteSpace ( arrayOctet [ i ] ) ) ) { return false ; } } return true ; } | Tests a given byte array to see if it contains only valid characters within the alphabet . The method optionally treats whitespace and pad as valid . |
34,167 | protected boolean containsAlphabetOrPad ( final byte [ ] arrayOctet ) { if ( arrayOctet == null ) { return false ; } for ( final byte element : arrayOctet ) { if ( PAD == element || isInAlphabet ( element ) ) { return true ; } } return false ; } | Tests a given byte array to see if it contains any characters within the alphabet or PAD . |
34,168 | public long getEncodedLength ( final byte [ ] pArray ) { long len = ( ( pArray . length + unencodedBlockSize - 1 ) / unencodedBlockSize ) * ( long ) encodedBlockSize ; if ( lineLength > 0 ) { len += ( ( len + lineLength - 1 ) / lineLength ) * chunkSeparatorLength ; } return len ; } | Calculates the amount of space needed to encode the supplied array . |
34,169 | public PutAction serialize ( byte [ ] keyBytes , FieldMapping fieldMapping , Object fieldValue ) { Put put = new Put ( keyBytes ) ; PutAction putAction = new PutAction ( put ) ; String fieldName = fieldMapping . getFieldName ( ) ; if ( fieldMapping . getMappingType ( ) == MappingType . COLUMN || fieldMapping . getMappingType ( ) == MappingType . COUNTER ) { serializeColumn ( fieldName , fieldMapping . getFamily ( ) , fieldMapping . getQualifier ( ) , fieldValue , put ) ; } else if ( fieldMapping . getMappingType ( ) == MappingType . KEY_AS_COLUMN ) { serializeKeyAsColumn ( fieldName , fieldMapping . getFamily ( ) , fieldMapping . getPrefix ( ) , fieldValue , put ) ; } else if ( fieldMapping . getMappingType ( ) == MappingType . OCC_VERSION ) { serializeOCCColumn ( fieldValue , putAction ) ; } else { throw new ValidationException ( "Invalid field mapping for field with name: " + fieldMapping . getFieldName ( ) ) ; } return putAction ; } | Serialize an entity s field value to a PutAction . |
34,170 | public Object deserialize ( FieldMapping fieldMapping , Result result ) { String fieldName = fieldMapping . getFieldName ( ) ; MappingType mappingType = fieldMapping . getMappingType ( ) ; if ( mappingType == MappingType . COLUMN || mappingType == MappingType . COUNTER ) { return deserializeColumn ( fieldMapping . getFieldName ( ) , fieldMapping . getFamily ( ) , fieldMapping . getQualifier ( ) , result ) ; } else if ( mappingType == MappingType . KEY_AS_COLUMN ) { return deserializeKeyAsColumn ( fieldMapping . getFieldName ( ) , fieldMapping . getFamily ( ) , fieldMapping . getPrefix ( ) , result ) ; } else if ( mappingType == MappingType . OCC_VERSION ) { return deserializeOCCColumn ( result ) ; } else { throw new ValidationException ( "Invalid field mapping for field with name: " + fieldName ) ; } } | Deserialize an entity field from the HBase Result . |
34,171 | private void serializeColumn ( String fieldName , byte [ ] family , byte [ ] qualifier , Object fieldValue , Put put ) { byte [ ] bytes = serializeColumnValueToBytes ( fieldName , fieldValue ) ; put . add ( family , qualifier , bytes ) ; } | Serialize the column value and update the Put with the serialized bytes . |
34,172 | private void serializeKeyAsColumn ( String fieldName , byte [ ] family , String prefix , Object fieldValue , Put put ) { Map < CharSequence , Object > keyAsColumnValues = entityComposer . extractKeyAsColumnValues ( fieldName , fieldValue ) ; for ( Entry < CharSequence , Object > entry : keyAsColumnValues . entrySet ( ) ) { CharSequence qualifier = entry . getKey ( ) ; byte [ ] qualifierBytes ; byte [ ] columnKeyBytes = serializeKeyAsColumnKeyToBytes ( fieldName , qualifier ) ; if ( prefix != null ) { byte [ ] prefixBytes = prefix . getBytes ( ) ; qualifierBytes = new byte [ prefixBytes . length + columnKeyBytes . length ] ; System . arraycopy ( prefixBytes , 0 , qualifierBytes , 0 , prefixBytes . length ) ; System . arraycopy ( columnKeyBytes , 0 , qualifierBytes , prefixBytes . length , columnKeyBytes . length ) ; } else { qualifierBytes = columnKeyBytes ; } byte [ ] bytes = serializeKeyAsColumnValueToBytes ( fieldName , qualifier , entry . getValue ( ) ) ; put . add ( family , qualifierBytes , bytes ) ; } } | Serialize a keyAsColumn field and update the put with the serialized bytes from each subfield of the keyAsColumn value . |
34,173 | private void serializeOCCColumn ( Object fieldValue , PutAction putAction ) { Long currVersion = ( Long ) fieldValue ; VersionCheckAction versionCheckAction = new VersionCheckAction ( currVersion ) ; putAction . getPut ( ) . add ( Constants . SYS_COL_FAMILY , Constants . VERSION_CHECK_COL_QUALIFIER , Bytes . toBytes ( currVersion + 1 ) ) ; putAction . setVersionCheckAction ( versionCheckAction ) ; } | Serialize the OCC column value and update the putAction with the serialized bytes . |
34,174 | private Object deserializeColumn ( String fieldName , byte [ ] family , byte [ ] qualifier , Result result ) { byte [ ] bytes = result . getValue ( family , qualifier ) ; if ( bytes == null ) { return getDefaultValue ( fieldName ) ; } else { return deserializeColumnValueFromBytes ( fieldName , bytes ) ; } } | Deserialize the entity field that has a column mapping . |
34,175 | private Object deserializeKeyAsColumn ( String fieldName , byte [ ] family , String prefix , Result result ) { byte [ ] prefixBytes = prefix != null ? prefix . getBytes ( ) : null ; Map < CharSequence , Object > fieldValueAsMap = new HashMap < CharSequence , Object > ( ) ; Map < byte [ ] , byte [ ] > familyMap = result . getFamilyMap ( family ) ; for ( Map . Entry < byte [ ] , byte [ ] > entry : familyMap . entrySet ( ) ) { byte [ ] qualifier = entry . getKey ( ) ; if ( prefixBytes != null && qualifier . length > prefixBytes . length && Arrays . equals ( Arrays . copyOf ( qualifier , prefixBytes . length ) , prefixBytes ) ) { qualifier = Arrays . copyOfRange ( qualifier , prefixBytes . length , qualifier . length ) ; } byte [ ] columnBytes = entry . getValue ( ) ; CharSequence keyAsColumnKey = deserializeKeyAsColumnKeyFromBytes ( fieldName , qualifier ) ; Object keyAsColumnValue = deserializeKeyAsColumnValueFromBytes ( fieldName , qualifier , columnBytes ) ; fieldValueAsMap . put ( keyAsColumnKey , keyAsColumnValue ) ; } return entityComposer . buildKeyAsColumnField ( fieldName , fieldValueAsMap ) ; } | Deserialize the entity field that has a keyAsColumn mapping . |
34,176 | private Object deserializeOCCColumn ( Result result ) { byte [ ] versionBytes = result . getValue ( Constants . SYS_COL_FAMILY , Constants . VERSION_CHECK_COL_QUALIFIER ) ; if ( versionBytes == null ) { return null ; } else { return Bytes . toLong ( versionBytes ) ; } } | Deserialize the OCC column value from the Result . |
34,177 | private static boolean mappingCompatible ( EntitySchema oldSchema , EntitySchema newSchema ) { for ( FieldMapping oldFieldMapping : oldSchema . getColumnMappingDescriptor ( ) . getFieldMappings ( ) ) { FieldMapping newFieldMapping = newSchema . getColumnMappingDescriptor ( ) . getFieldMapping ( oldFieldMapping . getFieldName ( ) ) ; if ( newFieldMapping != null ) { if ( ! oldFieldMapping . equals ( newFieldMapping ) ) { return false ; } } } return true ; } | Ensure that the column mappings for the shared fields between the old and new schema haven t changed . |
34,178 | public Command compile ( File morphlineFile , String morphlineId , MorphlineContext morphlineContext , Command finalChild , Config ... overrides ) { Config config ; try { config = parse ( morphlineFile , overrides ) ; } catch ( IOException e ) { throw new MorphlineCompilationException ( "Cannot parse morphline file: " + morphlineFile , null , e ) ; } Config morphlineConfig = find ( morphlineId , config , morphlineFile . getPath ( ) ) ; Command morphlineCommand = compile ( morphlineConfig , morphlineContext , finalChild ) ; return morphlineCommand ; } | Parses the given morphlineFile then finds the morphline with the given morphlineId within then compiles the morphline and returns the corresponding morphline command . The returned command will feed records into finalChild . |
34,179 | public Config parse ( File file , Config ... overrides ) throws IOException { if ( file == null || file . getPath ( ) . trim ( ) . length ( ) == 0 ) { throw new MorphlineCompilationException ( "Missing morphlineFile parameter" , null ) ; } if ( ! file . exists ( ) ) { throw new FileNotFoundException ( "File not found: " + file ) ; } if ( ! file . canRead ( ) ) { throw new IOException ( "Insufficient permissions to read file: " + file ) ; } Config config = ConfigFactory . parseFile ( file ) ; for ( Config override : overrides ) { config = override . withFallback ( config ) ; } synchronized ( LOCK ) { ConfigFactory . invalidateCaches ( ) ; config = ConfigFactory . load ( config ) ; config . checkValid ( ConfigFactory . defaultReference ( ) ) ; } return config ; } | Loads the given config file from the local file system |
34,180 | public Config find ( String morphlineId , Config config , String nameForErrorMsg ) { List < ? extends Config > morphlineConfigs = config . getConfigList ( "morphlines" ) ; if ( morphlineConfigs . size ( ) == 0 ) { throw new MorphlineCompilationException ( "Morphline file must contain at least one morphline: " + nameForErrorMsg , null ) ; } if ( morphlineId != null ) { morphlineId = morphlineId . trim ( ) ; } if ( morphlineId != null && morphlineId . length ( ) == 0 ) { morphlineId = null ; } Config morphlineConfig = null ; if ( morphlineId == null ) { morphlineConfig = morphlineConfigs . get ( 0 ) ; Preconditions . checkNotNull ( morphlineConfig ) ; } else { for ( Config candidate : morphlineConfigs ) { if ( morphlineId . equals ( new Configs ( ) . getString ( candidate , "id" , null ) ) ) { morphlineConfig = candidate ; break ; } } if ( morphlineConfig == null ) { throw new MorphlineCompilationException ( "Morphline id '" + morphlineId + "' not found in morphline file: " + nameForErrorMsg , null ) ; } } return morphlineConfig ; } | Finds the given morphline id within the given morphline config using the given nameForErrorMsg for error reporting . |
34,181 | public void start ( ) throws IOException , InterruptedException { for ( Service service : services ) { service . configure ( serviceConfig ) ; logger . info ( "Running Minicluster Service: " + service . getClass ( ) . getName ( ) ) ; service . start ( ) ; serviceConfig . setHadoopConf ( service . getHadoopConf ( ) ) ; DefaultConfiguration . set ( serviceConfig . getHadoopConf ( ) ) ; } logger . info ( "All Minicluster Services running." ) ; } | Starts the services in order passing the previous service s modified Configuration object to the next . |
34,182 | public void stop ( ) throws IOException , InterruptedException { for ( int i = services . size ( ) - 1 ; i >= 0 ; i -- ) { Service service = services . get ( i ) ; logger . info ( "Stopping Minicluster Service: " + service . getClass ( ) . getName ( ) ) ; service . stop ( ) ; } logger . info ( "All Minicluster Services stopped." ) ; } | Stops the services in reverse of their run order . |
34,183 | public void refreshManagedSchemaCache ( String tableName , String entityName ) { ManagedSchema managedSchema = managedSchemaDao . getManagedSchema ( tableName , entityName ) ; if ( managedSchema != null ) { getManagedSchemaMap ( ) . put ( getManagedSchemaMapKey ( managedSchema . getTable ( ) , managedSchema . getName ( ) ) , managedSchema ) ; } } | Update the managedSchemaMap for the entry defined by tableName and entityName . |
34,184 | private ConcurrentHashMap < String , ManagedSchema > getManagedSchemaMap ( ) { if ( managedSchemaMap == null ) { synchronized ( this ) { if ( managedSchemaMap == null ) { managedSchemaMap = new ConcurrentHashMap < String , ManagedSchema > ( ) ; populateManagedSchemaMap ( ) ; } } } return managedSchemaMap ; } | Get the managedSchemaMap lazily loading it if it hasn t been initialized yet . Members of this class should never access the managedSchemaMap directly but should always access it through this method . |
34,185 | private void populateManagedSchemaMap ( ) { Collection < ManagedSchema > schemas = managedSchemaDao . getManagedSchemas ( ) ; for ( ManagedSchema managedSchema : schemas ) { getManagedSchemaMap ( ) . put ( getManagedSchemaMapKey ( managedSchema . getTable ( ) , managedSchema . getName ( ) ) , managedSchema ) ; } } | Populate the managedSchemaMap with all of the managed schemas returned by the managedSchemaDao . |
34,186 | @ SuppressWarnings ( "unchecked" ) private KeyEntitySchemaParser < ? , ? > getSchemaParser ( String schemaParserClassName ) { if ( schemaParsers . contains ( schemaParserClassName ) ) { return schemaParsers . get ( schemaParserClassName ) ; } else { try { Class < KeyEntitySchemaParser < ? , ? > > schemaParserClass = ( Class < KeyEntitySchemaParser < ? , ? > > ) Class . forName ( schemaParserClassName ) ; KeyEntitySchemaParser < ? , ? > schemaParser = schemaParserClass . getConstructor ( ) . newInstance ( ) ; schemaParsers . putIfAbsent ( schemaParserClassName , schemaParser ) ; return schemaParser ; } catch ( Exception e ) { throw new DatasetException ( "Could not instantiate schema parser class: " + schemaParserClassName , e ) ; } } } | Get the schema parser by its classname . This method will cache the constructed schema parsers . |
34,187 | private Map < Integer , String > getManagedSchemaVersions ( String tableName , String entityName ) { ManagedSchema managedSchema = getManagedSchema ( tableName , entityName ) ; Map < Integer , String > returnMap = new HashMap < Integer , String > ( ) ; for ( Map . Entry < String , String > versionsEntry : managedSchema . getEntitySchemas ( ) . entrySet ( ) ) { returnMap . put ( Integer . parseInt ( versionsEntry . getKey ( ) ) , versionsEntry . getValue ( ) ) ; } return returnMap ; } | Get a map of schema versions for a managed schemas . |
34,188 | private ManagedSchema getManagedSchema ( String tableName , String entityName ) { ManagedSchema managedSchema = getManagedSchemaFromSchemaMap ( tableName , entityName ) ; if ( managedSchema == null ) { refreshManagedSchemaCache ( tableName , entityName ) ; managedSchema = getManagedSchemaFromSchemaMap ( tableName , entityName ) ; if ( managedSchema == null ) { String msg = "Could not find managed schemas for " + tableName + ", " + entityName ; throw new SchemaNotFoundException ( msg ) ; } } return managedSchema ; } | Get the ManagedSchema entity for the tableName entityName managed schema . |
34,189 | private void validateCompatibleWithTableSchemas ( String tableName , KeySchema keySchema , EntitySchema entitySchema ) { List < ManagedSchema > entitiesForTable = new ArrayList < ManagedSchema > ( ) ; for ( Entry < String , ManagedSchema > entry : getManagedSchemaMap ( ) . entrySet ( ) ) { if ( entry . getKey ( ) . startsWith ( tableName + ":" ) ) { entitiesForTable . add ( entry . getValue ( ) ) ; } } for ( ManagedSchema managedSchema : entitiesForTable ) { if ( ! managedSchema . getName ( ) . equals ( entitySchema . getName ( ) ) ) { KeyEntitySchemaParser < ? , ? > parser = getSchemaParser ( managedSchema . getSchemaType ( ) ) ; for ( String schema : managedSchema . getEntitySchemas ( ) . values ( ) ) { EntitySchema otherEntitySchema = parser . parseEntitySchema ( schema ) ; KeySchema otherKeySchema = parser . parseKeySchema ( schema ) ; if ( ! keySchema . compatible ( otherKeySchema ) ) { String msg = "StorageKey fields of schema not compatible with other schema for the table. " + "Table: " + tableName + ". Other schema: " + otherEntitySchema . getRawSchema ( ) + " New schema: " + entitySchema . getRawSchema ( ) ; throw new IncompatibleSchemaException ( msg ) ; } if ( ! validateCompatibleWithTableColumns ( entitySchema , otherEntitySchema ) ) { String msg = "Column mappings of schema not compatible with other schema for the table. " + "Table: " + tableName + ". Other schema: " + otherEntitySchema . getRawSchema ( ) + " New schema: " + entitySchema . getRawSchema ( ) ; throw new IncompatibleSchemaException ( msg ) ; } if ( ! validateCompatibleWithTableOccVersion ( entitySchema , otherEntitySchema ) ) { String msg = "OCCVersion mapping of schema not compatible with other schema for the table. " + "Only one schema in the table can have one." + "Table: " + tableName + ". Other schema: " + otherEntitySchema . getRawSchema ( ) + " New schema: " + entitySchema . getRawSchema ( ) ; throw new IncompatibleSchemaException ( msg ) ; } } } } } | Validate that a KeySchema and EntitySchema will be compatible with the other schemas registered with a table . This includes making sure that the schema doesn t overlap with columns other schemas for the table map to and validates that the key schemas are the same . |
34,190 | private boolean validateCompatibleWithTableColumns ( EntitySchema entitySchema1 , EntitySchema entitySchema2 ) { Set < String > entitySchema1Columns = new HashSet < String > ( ) ; List < String > entitySchema1KeyAsColumns = new ArrayList < String > ( ) ; for ( FieldMapping fieldMapping1 : entitySchema1 . getColumnMappingDescriptor ( ) . getFieldMappings ( ) ) { if ( fieldMapping1 . getMappingType ( ) == MappingType . COLUMN || fieldMapping1 . getMappingType ( ) == MappingType . COUNTER ) { entitySchema1Columns . add ( getColumnValue ( fieldMapping1 ) ) ; } else if ( fieldMapping1 . getMappingType ( ) == MappingType . KEY_AS_COLUMN ) { entitySchema1KeyAsColumns . add ( getColumnValue ( fieldMapping1 ) ) ; } } for ( FieldMapping fieldMapping2 : entitySchema2 . getColumnMappingDescriptor ( ) . getFieldMappings ( ) ) { if ( fieldMapping2 . getMappingType ( ) == MappingType . COLUMN || fieldMapping2 . getMappingType ( ) == MappingType . COUNTER ) { String value = getColumnValue ( fieldMapping2 ) ; if ( entitySchema1Columns . contains ( value ) ) { LOG . warn ( "Field: " + fieldMapping2 . getFieldName ( ) + " has a table column conflict with a column mapped field in " + entitySchema1 . getName ( ) ) ; return false ; } for ( String keyAsColumn : entitySchema1KeyAsColumns ) { if ( value . startsWith ( keyAsColumn ) ) { LOG . warn ( "Field: " + fieldMapping2 . getFieldName ( ) + " has a table column conflict with a keyAsColumn mapped field in " + entitySchema1 . getName ( ) ) ; return false ; } } } else if ( fieldMapping2 . getMappingType ( ) == MappingType . KEY_AS_COLUMN ) { String entitySchema2KeyAsColumn = getColumnValue ( fieldMapping2 ) ; for ( String entitySchema1KeyAsColumn : entitySchema1KeyAsColumns ) { if ( entitySchema1KeyAsColumn . startsWith ( entitySchema2KeyAsColumn ) ) { LOG . warn ( "Field " + fieldMapping2 . getFieldName ( ) + " has a table keyAsColumn conflict with a keyAsColumn mapped field in " + entitySchema1 . getName ( ) ) ; return false ; } } for ( String entitySchema1Column : entitySchema1Columns ) { if ( entitySchema1Column . startsWith ( entitySchema2KeyAsColumn ) ) { LOG . warn ( "Field " + fieldMapping2 . getFieldName ( ) + " has a table keyAsColumn conflict with a column mapped field in " + entitySchema1 . getName ( ) ) ; return false ; } } } } return true ; } | Validate that two schemas for a table don t overlap in columns . |
34,191 | private boolean validateCompatibleWithTableOccVersion ( EntitySchema entitySchema1 , EntitySchema entitySchema2 ) { boolean foundOccMapping = false ; for ( FieldMapping fieldMapping : entitySchema1 . getColumnMappingDescriptor ( ) . getFieldMappings ( ) ) { if ( fieldMapping . getMappingType ( ) == MappingType . OCC_VERSION ) { foundOccMapping = true ; break ; } } if ( foundOccMapping ) { for ( FieldMapping fieldMapping : entitySchema2 . getColumnMappingDescriptor ( ) . getFieldMappings ( ) ) { if ( fieldMapping . getMappingType ( ) == MappingType . OCC_VERSION ) { LOG . warn ( "Field: " + fieldMapping . getFieldName ( ) + " in schema " + entitySchema2 . getName ( ) + " conflicts with an occVersion field in " + entitySchema1 . getName ( ) ) ; return false ; } } } return true ; } | Only one schema for a table should contain an OCCVersion field mapping . This method will compare two schemas and return true if only one has an OCC_VERSION field . |
34,192 | public int readInt ( ) throws IOException { byte [ ] intBytes = new byte [ 4 ] ; int i = in . read ( intBytes ) ; if ( i < 4 ) { throw new EOFException ( ) ; } intBytes [ 0 ] = ( byte ) ( intBytes [ 0 ] ^ 0x80 ) ; int value = 0 ; for ( int j = 0 ; j < intBytes . length ; ++ j ) { value = ( value << 8 ) + ( intBytes [ j ] & 0xff ) ; } return value ; } | A int was written by flipping the sign bit and writing it as a big endian int . |
34,193 | public long readLong ( ) throws IOException { byte [ ] longBytes = new byte [ 8 ] ; int i = in . read ( longBytes ) ; if ( i < 8 ) { throw new EOFException ( ) ; } longBytes [ 0 ] = ( byte ) ( longBytes [ 0 ] ^ 0x80 ) ; long value = 0 ; for ( int j = 0 ; j < longBytes . length ; ++ j ) { value = ( value << 8 ) + ( longBytes [ j ] & 0xff ) ; } return value ; } | A long was written by flipping the sign bit and writing it as a big endian long . |
34,194 | public Utf8 readString ( Utf8 old ) throws IOException { ByteBuffer stringBytes = readBytes ( null ) ; return new Utf8 ( stringBytes . array ( ) ) ; } | A string is decoded by reading the string as bytes using the readBytes function . |
34,195 | public ByteBuffer readBytes ( ByteBuffer old ) throws IOException { ByteArrayOutputStream bytesOut = new ByteArrayOutputStream ( ) ; while ( true ) { int byteRead = in . read ( ) ; if ( byteRead < 0 ) { throw new EOFException ( ) ; } if ( byteRead == 0 ) { int secondByteRead = in . read ( ) ; if ( byteRead < 0 ) { throw new EOFException ( ) ; } if ( secondByteRead == 0 ) { break ; } else if ( secondByteRead == 1 ) { bytesOut . write ( 0 ) ; } else { String msg = "Illegal encoding. 0 byte cannot be followed by " + "anything other than 0 or 1. It was followed by " + Integer . toString ( byteRead ) ; throw new IOException ( msg ) ; } } else { bytesOut . write ( byteRead ) ; } } return ByteBuffer . wrap ( bytesOut . toByteArray ( ) ) ; } | Bytes are decoded by reading each byte until we find two consecutive 0 bytes . A 0 byte followed by a 1 byte is translated into a 0 byte . |
34,196 | public void readFixed ( byte [ ] bytes , int start , int length ) throws IOException { int i = in . read ( bytes , start , length ) ; if ( i < length ) { throw new EOFException ( ) ; } } | A fixed is decoded by just reading length bytes and placing the bytes read into the bytes array starting at index start . |
34,197 | public static < T > T runPrivileged ( UserGroupInformation login , PrivilegedExceptionAction < T > action ) { try { if ( login == null ) { return action . run ( ) ; } else { return login . doAs ( action ) ; } } catch ( IOException ex ) { throw new DatasetIOException ( "Privileged action failed" , ex ) ; } catch ( InterruptedException ex ) { Thread . interrupted ( ) ; throw new DatasetException ( ex ) ; } catch ( Exception ex ) { throw Throwables . propagate ( ex ) ; } } | Allow methods to act with the privileges of a login . |
34,198 | protected List < Command > buildCommandChain ( Config rootConfig , String configKey , Command finalChild , boolean ignoreNotifications ) { Preconditions . checkNotNull ( rootConfig ) ; Preconditions . checkNotNull ( configKey ) ; Preconditions . checkNotNull ( finalChild ) ; List < ? extends Config > commandConfigs = new Configs ( ) . getConfigList ( rootConfig , configKey , Collections . < Config > emptyList ( ) ) ; List < Command > commands = Lists . newArrayList ( ) ; Command currentParent = this ; Connector lastConnector = null ; for ( int i = 0 ; i < commandConfigs . size ( ) ; i ++ ) { boolean isLast = ( i == commandConfigs . size ( ) - 1 ) ; Connector connector = new Connector ( ignoreNotifications && isLast ) ; if ( isLast ) { connector . setChild ( finalChild ) ; } Config cmdConfig = commandConfigs . get ( i ) ; Command cmd = buildCommand ( cmdConfig , currentParent , connector ) ; commands . add ( cmd ) ; if ( i > 0 ) { lastConnector . setChild ( cmd ) ; } connector . setParent ( cmd ) ; currentParent = connector ; lastConnector = connector ; } return commands ; } | Factory method to create the chain of commands rooted at the given rootConfig . The last command in the chain will feed records into finalChild . |
34,199 | protected Command buildCommand ( Config cmdConfig , Command currentParent , Command finalChild ) { Preconditions . checkNotNull ( cmdConfig ) ; Preconditions . checkNotNull ( currentParent ) ; Preconditions . checkNotNull ( finalChild ) ; Set < Map . Entry < String , Object > > entries = cmdConfig . root ( ) . unwrapped ( ) . entrySet ( ) ; if ( entries . size ( ) != 1 ) { throw new MorphlineCompilationException ( "Illegal number of entries: " + entries . size ( ) , cmdConfig ) ; } Map . Entry < String , Object > entry = entries . iterator ( ) . next ( ) ; String cmdName = entry . getKey ( ) ; Class cmdClass ; LOG . trace ( "Building command: {}" , cmdName ) ; if ( ! cmdName . contains ( "." ) && ! cmdName . contains ( "/" ) ) { cmdClass = getContext ( ) . getCommandBuilder ( cmdName ) ; if ( cmdClass == null ) { throw new MorphlineCompilationException ( "No command builder registered for name: " + cmdName , cmdConfig ) ; } } else { String className = cmdName . replace ( '/' , '.' ) ; try { cmdClass = Class . forName ( className ) ; } catch ( ClassNotFoundException e ) { throw new MorphlineCompilationException ( "Cannot find command class: " + className , cmdConfig , e ) ; } } Object obj ; try { obj = cmdClass . newInstance ( ) ; } catch ( Exception e ) { throw new MorphlineCompilationException ( "Cannot instantiate command class: " + cmdClass . getName ( ) , cmdConfig , e ) ; } if ( ! ( obj instanceof CommandBuilder ) ) { throw new MorphlineCompilationException ( "Type of command " + cmdName + " must be an instance of " + CommandBuilder . class . getName ( ) + " but is: " + cmdClass . getName ( ) , cmdConfig ) ; } CommandBuilder builder = ( CommandBuilder ) obj ; Command cmd = builder . build ( cmdConfig . getConfig ( cmdName ) , currentParent , finalChild , getContext ( ) ) ; return cmd ; } | Factory method to create a command rooted at the given cmdConfig . The command will feed records into finalChild . The command will have currentParent as it s parent . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.