idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
14,000
static boolean isValidHostNameForSNI ( String hostname ) { return hostname != null && hostname . indexOf ( '.' ) > 0 && ! hostname . endsWith ( "." ) && ! NetUtil . isValidIpV4Address ( hostname ) && ! NetUtil . isValidIpV6Address ( hostname ) ; }
Validate that the given hostname can be used in SNI extension .
14,001
private boolean acceptStream ( int streamId , byte priority , boolean remoteSideClosed , boolean localSideClosed ) { if ( receivedGoAwayFrame || sentGoAwayFrame ) { return false ; } boolean remote = isRemoteInitiatedId ( streamId ) ; int maxConcurrentStreams = remote ? localConcurrentStreams : remoteConcurrentStreams ; if ( spdySession . numActiveStreams ( remote ) >= maxConcurrentStreams ) { return false ; } spdySession . acceptStream ( streamId , priority , remoteSideClosed , localSideClosed , initialSendWindowSize , initialReceiveWindowSize , remote ) ; if ( remote ) { lastGoodStreamId = streamId ; } return true ; }
need to synchronize accesses to sentGoAwayFrame lastGoodStreamId and initial window sizes
14,002
private static int findMatchingLength ( ByteBuf in , int minIndex , int inIndex , int maxIndex ) { int matched = 0 ; while ( inIndex <= maxIndex - 4 && in . getInt ( inIndex ) == in . getInt ( minIndex + matched ) ) { inIndex += 4 ; matched += 4 ; } while ( inIndex < maxIndex && in . getByte ( minIndex + matched ) == in . getByte ( inIndex ) ) { ++ inIndex ; ++ matched ; } return matched ; }
Iterates over the supplied input buffer between the supplied minIndex and maxIndex to find how long our matched copy overlaps with an already - written literal value .
14,003
static void encodeLiteral ( ByteBuf in , ByteBuf out , int length ) { if ( length < 61 ) { out . writeByte ( length - 1 << 2 ) ; } else { int bitLength = bitsToEncode ( length - 1 ) ; int bytesToEncode = 1 + bitLength / 8 ; out . writeByte ( 59 + bytesToEncode << 2 ) ; for ( int i = 0 ; i < bytesToEncode ; i ++ ) { out . writeByte ( length - 1 >> i * 8 & 0x0ff ) ; } } out . writeBytes ( in , length ) ; }
Writes a literal to the supplied output buffer by directly copying from the input buffer . The literal is taken from the current readerIndex up to the supplied length .
14,004
private static void encodeCopy ( ByteBuf out , int offset , int length ) { while ( length >= 68 ) { encodeCopyWithOffset ( out , offset , 64 ) ; length -= 64 ; } if ( length > 64 ) { encodeCopyWithOffset ( out , offset , 60 ) ; length -= 60 ; } encodeCopyWithOffset ( out , offset , length ) ; }
Encodes a series of copies each at most 64 bytes in length .
14,005
static int calculateChecksum ( ByteBuf data , int offset , int length ) { Crc32c crc32 = new Crc32c ( ) ; try { crc32 . update ( data , offset , length ) ; return maskChecksum ( ( int ) crc32 . getValue ( ) ) ; } finally { crc32 . reset ( ) ; } }
Computes the CRC32C checksum of the supplied data and performs the mask operation on the computed checksum
14,006
static boolean isJ2OCached ( String key , String value ) { return value . equals ( j2o . get ( key ) ) ; }
Tests if the specified key - value pair has been cached in Java - to - OpenSSL cache .
14,007
static boolean isO2JCached ( String key , String protocol , String value ) { Map < String , String > p2j = o2j . get ( key ) ; if ( p2j == null ) { return false ; } else { return value . equals ( p2j . get ( protocol ) ) ; } }
Tests if the specified key - value pair has been cached in OpenSSL - to - Java cache .
14,008
static String toOpenSsl ( String javaCipherSuite , boolean boringSSL ) { String converted = j2o . get ( javaCipherSuite ) ; if ( converted != null ) { return converted ; } return cacheFromJava ( javaCipherSuite , boringSSL ) ; }
Converts the specified Java cipher suite to its corresponding OpenSSL cipher suite name .
14,009
static String toJava ( String openSslCipherSuite , String protocol ) { Map < String , String > p2j = o2j . get ( openSslCipherSuite ) ; if ( p2j == null ) { p2j = cacheFromOpenSsl ( openSslCipherSuite ) ; if ( p2j == null ) { return null ; } } String javaCipherSuite = p2j . get ( protocol ) ; if ( javaCipherSuite == null ) { String cipher = p2j . get ( "" ) ; if ( cipher == null ) { return null ; } javaCipherSuite = protocol + '_' + cipher ; } return javaCipherSuite ; }
Convert from OpenSSL cipher suite name convention to java cipher suite name convention .
14,010
protected long delayNanos ( long currentTimeNanos ) { ScheduledFutureTask < ? > scheduledTask = peekScheduledTask ( ) ; if ( scheduledTask == null ) { return SCHEDULE_PURGE_INTERVAL ; } return scheduledTask . delayNanos ( currentTimeNanos ) ; }
Returns the amount of time left until the scheduled task with the closest dead line is executed .
14,011
protected boolean confirmShutdown ( ) { if ( ! isShuttingDown ( ) ) { return false ; } if ( ! inEventLoop ( ) ) { throw new IllegalStateException ( "must be invoked from an event loop" ) ; } cancelScheduledTasks ( ) ; if ( gracefulShutdownStartTime == 0 ) { gracefulShutdownStartTime = ScheduledFutureTask . nanoTime ( ) ; } if ( runAllTasks ( ) || runShutdownHooks ( ) ) { if ( isShutdown ( ) ) { return true ; } if ( gracefulShutdownQuietPeriod == 0 ) { return true ; } wakeup ( true ) ; return false ; } final long nanoTime = ScheduledFutureTask . nanoTime ( ) ; if ( isShutdown ( ) || nanoTime - gracefulShutdownStartTime > gracefulShutdownTimeout ) { return true ; } if ( nanoTime - lastExecutionTime <= gracefulShutdownQuietPeriod ) { wakeup ( true ) ; try { Thread . sleep ( 100 ) ; } catch ( InterruptedException e ) { } return false ; } return true ; }
Confirm that the shutdown if the instance should be done now!
14,012
public void setMaxHeaderTableSize ( ByteBuf out , long maxHeaderTableSize ) throws Http2Exception { if ( maxHeaderTableSize < MIN_HEADER_TABLE_SIZE || maxHeaderTableSize > MAX_HEADER_TABLE_SIZE ) { throw connectionError ( PROTOCOL_ERROR , "Header Table Size must be >= %d and <= %d but was %d" , MIN_HEADER_TABLE_SIZE , MAX_HEADER_TABLE_SIZE , maxHeaderTableSize ) ; } if ( this . maxHeaderTableSize == maxHeaderTableSize ) { return ; } this . maxHeaderTableSize = maxHeaderTableSize ; ensureCapacity ( 0 ) ; encodeInteger ( out , 0x20 , 5 , maxHeaderTableSize ) ; }
Set the maximum table size .
14,013
private void encodeStringLiteral ( ByteBuf out , CharSequence string ) { int huffmanLength = hpackHuffmanEncoder . getEncodedLength ( string ) ; if ( huffmanLength < string . length ( ) ) { encodeInteger ( out , 0x80 , 7 , huffmanLength ) ; hpackHuffmanEncoder . encode ( out , string ) ; } else { encodeInteger ( out , 0x00 , 7 , string . length ( ) ) ; if ( string instanceof AsciiString ) { AsciiString asciiString = ( AsciiString ) string ; out . writeBytes ( asciiString . array ( ) , asciiString . arrayOffset ( ) , asciiString . length ( ) ) ; } else { out . writeCharSequence ( string , CharsetUtil . ISO_8859_1 ) ; } } }
Encode string literal according to Section 5 . 2 .
14,014
private void encodeLiteral ( ByteBuf out , CharSequence name , CharSequence value , IndexType indexType , int nameIndex ) { boolean nameIndexValid = nameIndex != - 1 ; switch ( indexType ) { case INCREMENTAL : encodeInteger ( out , 0x40 , 6 , nameIndexValid ? nameIndex : 0 ) ; break ; case NONE : encodeInteger ( out , 0x00 , 4 , nameIndexValid ? nameIndex : 0 ) ; break ; case NEVER : encodeInteger ( out , 0x10 , 4 , nameIndexValid ? nameIndex : 0 ) ; break ; default : throw new Error ( "should not reach here" ) ; } if ( ! nameIndexValid ) { encodeStringLiteral ( out , name ) ; } encodeStringLiteral ( out , value ) ; }
Encode literal header field according to Section 6 . 2 .
14,015
HpackHeaderField getHeaderField ( int index ) { HeaderEntry entry = head ; while ( index -- >= 0 ) { entry = entry . before ; } return entry ; }
Return the header field at the given index . Exposed for testing .
14,016
private HeaderEntry getEntry ( CharSequence name , CharSequence value ) { if ( length ( ) == 0 || name == null || value == null ) { return null ; } int h = AsciiString . hashCode ( name ) ; int i = index ( h ) ; for ( HeaderEntry e = headerFields [ i ] ; e != null ; e = e . next ) { if ( e . hash == h && ( equalsConstantTime ( name , e . name ) & equalsConstantTime ( value , e . value ) ) != 0 ) { return e ; } } return null ; }
Returns the header entry with the lowest index value for the header field . Returns null if header field is not in the dynamic table .
14,017
private int getIndex ( CharSequence name ) { if ( length ( ) == 0 || name == null ) { return - 1 ; } int h = AsciiString . hashCode ( name ) ; int i = index ( h ) ; for ( HeaderEntry e = headerFields [ i ] ; e != null ; e = e . next ) { if ( e . hash == h && equalsConstantTime ( name , e . name ) != 0 ) { return getIndex ( e . index ) ; } } return - 1 ; }
Returns the lowest index value for the header field name in the dynamic table . Returns - 1 if the header field name is not in the dynamic table .
14,018
private void add ( CharSequence name , CharSequence value , long headerSize ) { if ( headerSize > maxHeaderTableSize ) { clear ( ) ; return ; } while ( maxHeaderTableSize - size < headerSize ) { remove ( ) ; } int h = AsciiString . hashCode ( name ) ; int i = index ( h ) ; HeaderEntry old = headerFields [ i ] ; HeaderEntry e = new HeaderEntry ( h , name , value , head . before . index - 1 , old ) ; headerFields [ i ] = e ; e . addBefore ( head ) ; size += headerSize ; }
Add the header field to the dynamic table . Entries are evicted from the dynamic table until the size of the table and the new header field is less than the table s maxHeaderTableSize . If the size of the new entry is larger than the table s maxHeaderTableSize the dynamic table will be cleared .
14,019
private static List < String > dedup ( List < String > encoded , Map < String , Integer > nameToLastIndex ) { boolean [ ] isLastInstance = new boolean [ encoded . size ( ) ] ; for ( int idx : nameToLastIndex . values ( ) ) { isLastInstance [ idx ] = true ; } List < String > dedupd = new ArrayList < String > ( nameToLastIndex . size ( ) ) ; for ( int i = 0 , n = encoded . size ( ) ; i < n ; i ++ ) { if ( isLastInstance [ i ] ) { dedupd . add ( encoded . get ( i ) ) ; } } return dedupd ; }
Deduplicate a list of encoded cookies by keeping only the last instance with a given name .
14,020
public void trace ( String format , Object arg ) { if ( isTraceEnabled ( ) ) { FormattingTuple ft = MessageFormatter . format ( format , arg ) ; logger . log ( FQCN , traceCapable ? Level . TRACE : Level . DEBUG , ft . getMessage ( ) , ft . getThrowable ( ) ) ; } }
Log a message at level TRACE according to the specified format and argument .
14,021
public void warn ( String msg ) { logger . log ( FQCN , Level . WARN , msg , null ) ; }
Log a message object at the WARN level .
14,022
public SslContextBuilder protocols ( String ... protocols ) { this . protocols = protocols == null ? null : protocols . clone ( ) ; return this ; }
The TLS protocol versions to enable .
14,023
public int length ( ) { int length ; if ( head < tail ) { length = hpackHeaderFields . length - tail + head ; } else { length = head - tail ; } return length ; }
Return the number of header fields in the dynamic table .
14,024
public void add ( HpackHeaderField header ) { int headerSize = header . size ( ) ; if ( headerSize > capacity ) { clear ( ) ; return ; } while ( capacity - size < headerSize ) { remove ( ) ; } hpackHeaderFields [ head ++ ] = header ; size += header . size ( ) ; if ( head == hpackHeaderFields . length ) { head = 0 ; } }
Add the header field to the dynamic table . Entries are evicted from the dynamic table until the size of the table and the new header field is less than or equal to the table s capacity . If the size of the new entry is larger than the table s capacity the dynamic table will be cleared .
14,025
public HpackHeaderField remove ( ) { HpackHeaderField removed = hpackHeaderFields [ tail ] ; if ( removed == null ) { return null ; } size -= removed . size ( ) ; hpackHeaderFields [ tail ++ ] = null ; if ( tail == hpackHeaderFields . length ) { tail = 0 ; } return removed ; }
Remove and return the oldest header field from the dynamic table .
14,026
public void clear ( ) { while ( tail != head ) { hpackHeaderFields [ tail ++ ] = null ; if ( tail == hpackHeaderFields . length ) { tail = 0 ; } } head = 0 ; tail = 0 ; size = 0 ; }
Remove all entries from the dynamic table .
14,027
public void setCapacity ( long capacity ) { if ( capacity < MIN_HEADER_TABLE_SIZE || capacity > MAX_HEADER_TABLE_SIZE ) { throw new IllegalArgumentException ( "capacity is invalid: " + capacity ) ; } if ( this . capacity == capacity ) { return ; } this . capacity = capacity ; if ( capacity == 0 ) { clear ( ) ; } else { while ( size > capacity ) { remove ( ) ; } } int maxEntries = ( int ) ( capacity / HpackHeaderField . HEADER_ENTRY_OVERHEAD ) ; if ( capacity % HpackHeaderField . HEADER_ENTRY_OVERHEAD != 0 ) { maxEntries ++ ; } if ( hpackHeaderFields != null && hpackHeaderFields . length == maxEntries ) { return ; } HpackHeaderField [ ] tmp = new HpackHeaderField [ maxEntries ] ; int len = length ( ) ; int cursor = tail ; for ( int i = 0 ; i < len ; i ++ ) { HpackHeaderField entry = hpackHeaderFields [ cursor ++ ] ; tmp [ i ] = entry ; if ( cursor == hpackHeaderFields . length ) { cursor = 0 ; } } tail = 0 ; head = tail + len ; hpackHeaderFields = tmp ; }
Set the maximum size of the dynamic table . Entries are evicted from the dynamic table until the size of the table is less than or equal to the maximum size .
14,028
private void setUpgradeRequestHeaders ( ChannelHandlerContext ctx , HttpRequest request ) { request . headers ( ) . set ( HttpHeaderNames . UPGRADE , upgradeCodec . protocol ( ) ) ; Set < CharSequence > connectionParts = new LinkedHashSet < CharSequence > ( 2 ) ; connectionParts . addAll ( upgradeCodec . setUpgradeHeaders ( ctx , request ) ) ; StringBuilder builder = new StringBuilder ( ) ; for ( CharSequence part : connectionParts ) { builder . append ( part ) ; builder . append ( ',' ) ; } builder . append ( HttpHeaderValues . UPGRADE ) ; request . headers ( ) . add ( HttpHeaderNames . CONNECTION , builder . toString ( ) ) ; }
Adds all upgrade request headers necessary for an upgrade to the supported protocols .
14,029
private void sendNotModified ( ChannelHandlerContext ctx ) { FullHttpResponse response = new DefaultFullHttpResponse ( HTTP_1_1 , NOT_MODIFIED ) ; setDateHeader ( response ) ; this . sendAndCleanupConnection ( ctx , response ) ; }
When file timestamp is the same as what the browser is sending up send a 304 Not Modified
14,030
private void bindCompressorToStream ( EmbeddedChannel compressor , int streamId ) { if ( compressor != null ) { Http2Stream stream = connection ( ) . stream ( streamId ) ; if ( stream != null ) { stream . setProperty ( propertyKey , compressor ) ; } } }
Called after the super class has written the headers and created any associated stream objects .
14,031
static void writeRawVarint32 ( ByteBuf out , int value ) { while ( true ) { if ( ( value & ~ 0x7F ) == 0 ) { out . writeByte ( value ) ; return ; } else { out . writeByte ( ( value & 0x7F ) | 0x80 ) ; value >>>= 7 ; } } }
Writes protobuf varint32 to (
14,032
private static MqttFixedHeader decodeFixedHeader ( ByteBuf buffer ) { short b1 = buffer . readUnsignedByte ( ) ; MqttMessageType messageType = MqttMessageType . valueOf ( b1 >> 4 ) ; boolean dupFlag = ( b1 & 0x08 ) == 0x08 ; int qosLevel = ( b1 & 0x06 ) >> 1 ; boolean retain = ( b1 & 0x01 ) != 0 ; int remainingLength = 0 ; int multiplier = 1 ; short digit ; int loops = 0 ; do { digit = buffer . readUnsignedByte ( ) ; remainingLength += ( digit & 127 ) * multiplier ; multiplier *= 128 ; loops ++ ; } while ( ( digit & 128 ) != 0 && loops < 4 ) ; if ( loops == 4 && ( digit & 128 ) != 0 ) { throw new DecoderException ( "remaining length exceeds 4 digits (" + messageType + ')' ) ; } MqttFixedHeader decodedFixedHeader = new MqttFixedHeader ( messageType , dupFlag , MqttQoS . valueOf ( qosLevel ) , retain , remainingLength ) ; return validateFixedHeader ( resetUnusedFields ( decodedFixedHeader ) ) ; }
Decodes the fixed header . It s one byte for the flags and then variable bytes for the remaining length .
14,033
private static Result < ? > decodePayload ( ByteBuf buffer , MqttMessageType messageType , int bytesRemainingInVariablePart , Object variableHeader ) { switch ( messageType ) { case CONNECT : return decodeConnectionPayload ( buffer , ( MqttConnectVariableHeader ) variableHeader ) ; case SUBSCRIBE : return decodeSubscribePayload ( buffer , bytesRemainingInVariablePart ) ; case SUBACK : return decodeSubackPayload ( buffer , bytesRemainingInVariablePart ) ; case UNSUBSCRIBE : return decodeUnsubscribePayload ( buffer , bytesRemainingInVariablePart ) ; case PUBLISH : return decodePublishPayload ( buffer , bytesRemainingInVariablePart ) ; default : return new Result < Object > ( null , 0 ) ; } }
Decodes the payload .
14,034
public final void add ( ByteBuf buf , ChannelFutureListener listener ) { bufAndListenerPairs . add ( buf ) ; if ( listener != null ) { bufAndListenerPairs . add ( listener ) ; } incrementReadableBytes ( buf . readableBytes ( ) ) ; }
Add a buffer to the end of the queue and associate a listener with it that should be completed when all the buffers bytes have been consumed from the queue and written .
14,035
public final void copyTo ( AbstractCoalescingBufferQueue dest ) { dest . bufAndListenerPairs . addAll ( bufAndListenerPairs ) ; dest . incrementReadableBytes ( readableBytes ) ; }
Copy all pending entries in this queue into the destination queue .
14,036
public final void writeAndRemoveAll ( ChannelHandlerContext ctx ) { decrementReadableBytes ( readableBytes ) ; Throwable pending = null ; ByteBuf previousBuf = null ; for ( ; ; ) { Object entry = bufAndListenerPairs . poll ( ) ; try { if ( entry == null ) { if ( previousBuf != null ) { ctx . write ( previousBuf , ctx . voidPromise ( ) ) ; } break ; } if ( entry instanceof ByteBuf ) { if ( previousBuf != null ) { ctx . write ( previousBuf , ctx . voidPromise ( ) ) ; } previousBuf = ( ByteBuf ) entry ; } else if ( entry instanceof ChannelPromise ) { ctx . write ( previousBuf , ( ChannelPromise ) entry ) ; previousBuf = null ; } else { ctx . write ( previousBuf ) . addListener ( ( ChannelFutureListener ) entry ) ; previousBuf = null ; } } catch ( Throwable t ) { if ( pending == null ) { pending = t ; } else { logger . info ( "Throwable being suppressed because Throwable {} is already pending" , pending , t ) ; } } } if ( pending != null ) { throw new IllegalStateException ( pending ) ; } }
Writes all remaining elements in this queue .
14,037
public static HostsFileEntries parse ( File file , Charset ... charsets ) throws IOException { checkNotNull ( file , "file" ) ; checkNotNull ( charsets , "charsets" ) ; if ( file . exists ( ) && file . isFile ( ) ) { for ( Charset charset : charsets ) { HostsFileEntries entries = parse ( new BufferedReader ( new InputStreamReader ( new FileInputStream ( file ) , charset ) ) ) ; if ( entries != HostsFileEntries . EMPTY ) { return entries ; } } } return HostsFileEntries . EMPTY ; }
Parse a hosts file .
14,038
public static HostsFileEntries parse ( Reader reader ) throws IOException { checkNotNull ( reader , "reader" ) ; BufferedReader buff = new BufferedReader ( reader ) ; try { Map < String , Inet4Address > ipv4Entries = new HashMap < String , Inet4Address > ( ) ; Map < String , Inet6Address > ipv6Entries = new HashMap < String , Inet6Address > ( ) ; String line ; while ( ( line = buff . readLine ( ) ) != null ) { int commentPosition = line . indexOf ( '#' ) ; if ( commentPosition != - 1 ) { line = line . substring ( 0 , commentPosition ) ; } line = line . trim ( ) ; if ( line . isEmpty ( ) ) { continue ; } List < String > lineParts = new ArrayList < String > ( ) ; for ( String s : WHITESPACES . split ( line ) ) { if ( ! s . isEmpty ( ) ) { lineParts . add ( s ) ; } } if ( lineParts . size ( ) < 2 ) { continue ; } byte [ ] ipBytes = NetUtil . createByteArrayFromIpAddressString ( lineParts . get ( 0 ) ) ; if ( ipBytes == null ) { continue ; } for ( int i = 1 ; i < lineParts . size ( ) ; i ++ ) { String hostname = lineParts . get ( i ) ; String hostnameLower = hostname . toLowerCase ( Locale . ENGLISH ) ; InetAddress address = InetAddress . getByAddress ( hostname , ipBytes ) ; if ( address instanceof Inet4Address ) { Inet4Address previous = ipv4Entries . put ( hostnameLower , ( Inet4Address ) address ) ; if ( previous != null ) { ipv4Entries . put ( hostnameLower , previous ) ; } } else { Inet6Address previous = ipv6Entries . put ( hostnameLower , ( Inet6Address ) address ) ; if ( previous != null ) { ipv6Entries . put ( hostnameLower , previous ) ; } } } } return ipv4Entries . isEmpty ( ) && ipv6Entries . isEmpty ( ) ? HostsFileEntries . EMPTY : new HostsFileEntries ( ipv4Entries , ipv6Entries ) ; } finally { try { buff . close ( ) ; } catch ( IOException e ) { logger . warn ( "Failed to close a reader" , e ) ; } } }
Parse a reader of hosts file format .
14,039
final int calculateOutNetBufSize ( int plaintextBytes , int numBuffers ) { long maxOverhead = ( long ) Conscrypt . maxSealOverhead ( getWrappedEngine ( ) ) * numBuffers ; return ( int ) min ( Integer . MAX_VALUE , plaintextBytes + maxOverhead ) ; }
Calculates the maximum size of the encrypted output buffer required to wrap the given plaintext bytes . Assumes as a worst case that there is one TLS record per buffer .
14,040
private static int findVersion ( final ByteBuf buffer ) { final int n = buffer . readableBytes ( ) ; if ( n < 13 ) { return - 1 ; } int idx = buffer . readerIndex ( ) ; return match ( BINARY_PREFIX , buffer , idx ) ? buffer . getByte ( idx + BINARY_PREFIX_LENGTH ) : 1 ; }
Returns the proxy protocol specification version in the buffer if the version is found . Returns - 1 if no version was found in the buffer .
14,041
private static int findEndOfHeader ( final ByteBuf buffer ) { final int n = buffer . readableBytes ( ) ; if ( n < 16 ) { return - 1 ; } int offset = buffer . readerIndex ( ) + 14 ; int totalHeaderBytes = 16 + buffer . getUnsignedShort ( offset ) ; if ( n >= totalHeaderBytes ) { return totalHeaderBytes ; } else { return - 1 ; } }
Returns the index in the buffer of the end of header if found . Returns - 1 if no end of header was found in the buffer .
14,042
void createGlobalTrafficCounter ( ScheduledExecutorService executor ) { setMaxDeviation ( DEFAULT_DEVIATION , DEFAULT_SLOWDOWN , DEFAULT_ACCELERATION ) ; if ( executor == null ) { throw new IllegalArgumentException ( "Executor must not be null" ) ; } TrafficCounter tc = new GlobalChannelTrafficCounter ( this , executor , "GlobalChannelTC" , checkInterval ) ; setTrafficCounter ( tc ) ; tc . start ( ) ; }
Create the global TrafficCounter
14,043
public Collection < TrafficCounter > channelTrafficCounters ( ) { return new AbstractCollection < TrafficCounter > ( ) { public Iterator < TrafficCounter > iterator ( ) { return new Iterator < TrafficCounter > ( ) { final Iterator < PerChannel > iter = channelQueues . values ( ) . iterator ( ) ; public boolean hasNext ( ) { return iter . hasNext ( ) ; } public TrafficCounter next ( ) { return iter . next ( ) . channelTrafficCounter ; } public void remove ( ) { throw new UnsupportedOperationException ( ) ; } } ; } public int size ( ) { return channelQueues . size ( ) ; } } ; }
To allow for instance doAccounting to use the TrafficCounter per channel .
14,044
boolean hasReadableBits ( int count ) { if ( count < 0 ) { throw new IllegalArgumentException ( "count: " + count + " (expected value greater than 0)" ) ; } return bitCount >= count || ( in . readableBytes ( ) << 3 & Integer . MAX_VALUE ) >= count - bitCount ; }
Checks that the specified number of bits available for reading .
14,045
boolean hasReadableBytes ( int count ) { if ( count < 0 || count > MAX_COUNT_OF_READABLE_BYTES ) { throw new IllegalArgumentException ( "count: " + count + " (expected: 0-" + MAX_COUNT_OF_READABLE_BYTES + ')' ) ; } return hasReadableBits ( count << 3 ) ; }
Checks that the specified number of bytes available for reading .
14,046
private static int parseCode ( ByteBuf buffer ) { final int first = parseNumber ( buffer . readByte ( ) ) * 100 ; final int second = parseNumber ( buffer . readByte ( ) ) * 10 ; final int third = parseNumber ( buffer . readByte ( ) ) ; return first + second + third ; }
Parses the io . netty . handler . codec . smtp code without any allocation which is three digits .
14,047
private static boolean isLineBased ( final ByteBuf [ ] delimiters ) { if ( delimiters . length != 2 ) { return false ; } ByteBuf a = delimiters [ 0 ] ; ByteBuf b = delimiters [ 1 ] ; if ( a . capacity ( ) < b . capacity ( ) ) { a = delimiters [ 1 ] ; b = delimiters [ 0 ] ; } return a . capacity ( ) == 2 && b . capacity ( ) == 1 && a . getByte ( 0 ) == '\r' && a . getByte ( 1 ) == '\n' && b . getByte ( 0 ) == '\n' ; }
Returns true if the delimiters are \ n and \ r \ n .
14,048
protected S state ( S newState ) { S oldState = state ; state = newState ; return oldState ; }
Sets the current state of this decoder .
14,049
private static boolean isLast ( HttpMessage httpMessage ) { if ( httpMessage instanceof FullHttpMessage ) { FullHttpMessage fullMessage = ( FullHttpMessage ) httpMessage ; if ( fullMessage . trailingHeaders ( ) . isEmpty ( ) && ! fullMessage . content ( ) . isReadable ( ) ) { return true ; } } return false ; }
Checks if the given HTTP message should be considered as a last SPDY frame .
14,050
Map < Integer , StreamState > activeStreams ( ) { Map < Integer , StreamState > streams = new TreeMap < Integer , StreamState > ( streamComparator ) ; streams . putAll ( activeStreams ) ; return streams ; }
Stream - IDs should be iterated in priority order
14,051
public Set < String > subprotocols ( ) { Set < String > ret = new LinkedHashSet < String > ( ) ; Collections . addAll ( ret , subprotocols ) ; return ret ; }
Returns the CSV of supported sub protocols
14,052
protected String selectSubprotocol ( String requestedSubprotocols ) { if ( requestedSubprotocols == null || subprotocols . length == 0 ) { return null ; } String [ ] requestedSubprotocolArray = requestedSubprotocols . split ( "," ) ; for ( String p : requestedSubprotocolArray ) { String requestedSubprotocol = p . trim ( ) ; for ( String supportedSubprotocol : subprotocols ) { if ( SUB_PROTOCOL_WILDCARD . equals ( supportedSubprotocol ) || requestedSubprotocol . equals ( supportedSubprotocol ) ) { selectedSubprotocol = requestedSubprotocol ; return requestedSubprotocol ; } } } return null ; }
Selects the first matching supported sub protocol
14,053
public static int indexOf ( ByteBuf needle , ByteBuf haystack ) { int attempts = haystack . readableBytes ( ) - needle . readableBytes ( ) + 1 ; for ( int i = 0 ; i < attempts ; i ++ ) { if ( equals ( needle , needle . readerIndex ( ) , haystack , haystack . readerIndex ( ) + i , needle . readableBytes ( ) ) ) { return haystack . readerIndex ( ) + i ; } } return - 1 ; }
Returns the reader index of needle in haystack or - 1 if needle is not in haystack .
14,054
@ SuppressWarnings ( "deprecation" ) public static ByteBuf writeShortBE ( ByteBuf buf , int shortValue ) { return buf . order ( ) == ByteOrder . BIG_ENDIAN ? buf . writeShort ( shortValue ) : buf . writeShortLE ( shortValue ) ; }
Writes a big - endian 16 - bit short integer to the buffer .
14,055
@ SuppressWarnings ( "deprecation" ) public static ByteBuf setShortBE ( ByteBuf buf , int index , int shortValue ) { return buf . order ( ) == ByteOrder . BIG_ENDIAN ? buf . setShort ( index , shortValue ) : buf . setShortLE ( index , shortValue ) ; }
Sets a big - endian 16 - bit short integer to the buffer .
14,056
@ SuppressWarnings ( "deprecation" ) public static ByteBuf writeMediumBE ( ByteBuf buf , int mediumValue ) { return buf . order ( ) == ByteOrder . BIG_ENDIAN ? buf . writeMedium ( mediumValue ) : buf . writeMediumLE ( mediumValue ) ; }
Writes a big - endian 24 - bit medium integer to the buffer .
14,057
public static ByteBuf threadLocalDirectBuffer ( ) { if ( THREAD_LOCAL_BUFFER_SIZE <= 0 ) { return null ; } if ( PlatformDependent . hasUnsafe ( ) ) { return ThreadLocalUnsafeDirectByteBuf . newInstance ( ) ; } else { return ThreadLocalDirectByteBuf . newInstance ( ) ; } }
Returns a cached thread - local direct buffer if available .
14,058
public static void throwException ( Throwable t ) { if ( hasUnsafe ( ) ) { PlatformDependent0 . throwException ( t ) ; } else { PlatformDependent . < RuntimeException > throwException0 ( t ) ; } }
Raises an exception bypassing compiler checks for checked exceptions .
14,059
public static boolean isZero ( byte [ ] bytes , int startPos , int length ) { return ! hasUnsafe ( ) || ! unalignedAccess ( ) ? isZeroSafe ( bytes , startPos , length ) : PlatformDependent0 . isZero ( bytes , startPos , length ) ; }
Determine if a subsection of an array is zero .
14,060
public static int hashCodeAscii ( byte [ ] bytes , int startPos , int length ) { return ! hasUnsafe ( ) || ! unalignedAccess ( ) ? hashCodeAsciiSafe ( bytes , startPos , length ) : PlatformDependent0 . hashCodeAscii ( bytes , startPos , length ) ; }
Calculate a hash code of a byte array assuming ASCII character encoding . The resulting hash code will be case insensitive .
14,061
static int hashCodeAsciiSafe ( byte [ ] bytes , int startPos , int length ) { int hash = HASH_CODE_ASCII_SEED ; final int remainingBytes = length & 7 ; final int end = startPos + remainingBytes ; for ( int i = startPos - 8 + length ; i >= end ; i -= 8 ) { hash = PlatformDependent0 . hashCodeAsciiCompute ( getLongSafe ( bytes , i ) , hash ) ; } switch ( remainingBytes ) { case 7 : return ( ( hash * HASH_CODE_C1 + hashCodeAsciiSanitize ( bytes [ startPos ] ) ) * HASH_CODE_C2 + hashCodeAsciiSanitize ( getShortSafe ( bytes , startPos + 1 ) ) ) * HASH_CODE_C1 + hashCodeAsciiSanitize ( getIntSafe ( bytes , startPos + 3 ) ) ; case 6 : return ( hash * HASH_CODE_C1 + hashCodeAsciiSanitize ( getShortSafe ( bytes , startPos ) ) ) * HASH_CODE_C2 + hashCodeAsciiSanitize ( getIntSafe ( bytes , startPos + 2 ) ) ; case 5 : return ( hash * HASH_CODE_C1 + hashCodeAsciiSanitize ( bytes [ startPos ] ) ) * HASH_CODE_C2 + hashCodeAsciiSanitize ( getIntSafe ( bytes , startPos + 1 ) ) ; case 4 : return hash * HASH_CODE_C1 + hashCodeAsciiSanitize ( getIntSafe ( bytes , startPos ) ) ; case 3 : return ( hash * HASH_CODE_C1 + hashCodeAsciiSanitize ( bytes [ startPos ] ) ) * HASH_CODE_C2 + hashCodeAsciiSanitize ( getShortSafe ( bytes , startPos + 1 ) ) ; case 2 : return hash * HASH_CODE_C1 + hashCodeAsciiSanitize ( getShortSafe ( bytes , startPos ) ) ; case 1 : return hash * HASH_CODE_C1 + hashCodeAsciiSanitize ( bytes [ startPos ] ) ; default : return hash ; } }
Package private for testing purposes only!
14,062
private T getOrCreate ( String name ) { T constant = constants . get ( name ) ; if ( constant == null ) { final T tempConstant = newConstant ( nextId ( ) , name ) ; constant = constants . putIfAbsent ( name , tempConstant ) ; if ( constant == null ) { return tempConstant ; } } return constant ; }
Get existing constant by name or creates new one if not exists . Threadsafe
14,063
private T createOrThrow ( String name ) { T constant = constants . get ( name ) ; if ( constant == null ) { final T tempConstant = newConstant ( nextId ( ) , name ) ; constant = constants . putIfAbsent ( name , tempConstant ) ; if ( constant == null ) { return tempConstant ; } } throw new IllegalArgumentException ( String . format ( "'%s' is already in use" , name ) ) ; }
Creates constant by name or throws exception . Threadsafe
14,064
private static ChunkType mapChunkType ( byte type ) { if ( type == 0 ) { return ChunkType . COMPRESSED_DATA ; } else if ( type == 1 ) { return ChunkType . UNCOMPRESSED_DATA ; } else if ( type == ( byte ) 0xff ) { return ChunkType . STREAM_IDENTIFIER ; } else if ( ( type & 0x80 ) == 0x80 ) { return ChunkType . RESERVED_SKIPPABLE ; } else { return ChunkType . RESERVED_UNSKIPPABLE ; } }
Decodes the chunk type from the type tag byte .
14,065
private void setMultipart ( String contentType ) { String [ ] dataBoundary = HttpPostRequestDecoder . getMultipartDataBoundary ( contentType ) ; if ( dataBoundary != null ) { multipartDataBoundary = dataBoundary [ 0 ] ; if ( dataBoundary . length > 1 && dataBoundary [ 1 ] != null ) { charset = Charset . forName ( dataBoundary [ 1 ] ) ; } } else { multipartDataBoundary = null ; } currentStatus = MultiPartStatus . HEADERDELIMITER ; }
Set from the request ContentType the multipartDataBoundary and the possible charset .
14,066
private void parseBodyMultipart ( ) { if ( undecodedChunk == null || undecodedChunk . readableBytes ( ) == 0 ) { return ; } InterfaceHttpData data = decodeMultipart ( currentStatus ) ; while ( data != null ) { addHttpData ( data ) ; if ( currentStatus == MultiPartStatus . PREEPILOGUE || currentStatus == MultiPartStatus . EPILOGUE ) { break ; } data = decodeMultipart ( currentStatus ) ; } }
Parse the Body for multipart
14,067
private static void skipControlCharacters ( ByteBuf undecodedChunk ) { if ( ! undecodedChunk . hasArray ( ) ) { try { skipControlCharactersStandard ( undecodedChunk ) ; } catch ( IndexOutOfBoundsException e1 ) { throw new NotEnoughDataDecoderException ( e1 ) ; } return ; } SeekAheadOptimize sao = new SeekAheadOptimize ( undecodedChunk ) ; while ( sao . pos < sao . limit ) { char c = ( char ) ( sao . bytes [ sao . pos ++ ] & 0xFF ) ; if ( ! Character . isISOControl ( c ) && ! Character . isWhitespace ( c ) ) { sao . setReadPosition ( 1 ) ; return ; } } throw new NotEnoughDataDecoderException ( "Access out of bounds" ) ; }
Skip control Characters
14,068
private InterfaceHttpData findMultipartDelimiter ( String delimiter , MultiPartStatus dispositionStatus , MultiPartStatus closeDelimiterStatus ) { int readerIndex = undecodedChunk . readerIndex ( ) ; try { skipControlCharacters ( undecodedChunk ) ; } catch ( NotEnoughDataDecoderException ignored ) { undecodedChunk . readerIndex ( readerIndex ) ; return null ; } skipOneLine ( ) ; String newline ; try { newline = readDelimiter ( undecodedChunk , delimiter ) ; } catch ( NotEnoughDataDecoderException ignored ) { undecodedChunk . readerIndex ( readerIndex ) ; return null ; } if ( newline . equals ( delimiter ) ) { currentStatus = dispositionStatus ; return decodeMultipart ( dispositionStatus ) ; } if ( newline . equals ( delimiter + "--" ) ) { currentStatus = closeDelimiterStatus ; if ( currentStatus == MultiPartStatus . HEADERDELIMITER ) { currentFieldAttributes = null ; return decodeMultipart ( MultiPartStatus . HEADERDELIMITER ) ; } return null ; } undecodedChunk . readerIndex ( readerIndex ) ; throw new ErrorDataDecoderException ( "No Multipart delimiter found" ) ; }
Find the next Multipart Delimiter
14,069
private void cleanMixedAttributes ( ) { currentFieldAttributes . remove ( HttpHeaderValues . CHARSET ) ; currentFieldAttributes . remove ( HttpHeaderNames . CONTENT_LENGTH ) ; currentFieldAttributes . remove ( HttpHeaderNames . CONTENT_TRANSFER_ENCODING ) ; currentFieldAttributes . remove ( HttpHeaderNames . CONTENT_TYPE ) ; currentFieldAttributes . remove ( HttpHeaderValues . FILENAME ) ; }
Remove all Attributes that should be cleaned between two FileUpload in Mixed mode
14,070
private static boolean loadDataMultipartStandard ( ByteBuf undecodedChunk , String delimiter , HttpData httpData ) { final int startReaderIndex = undecodedChunk . readerIndex ( ) ; final int delimeterLength = delimiter . length ( ) ; int index = 0 ; int lastPosition = startReaderIndex ; byte prevByte = HttpConstants . LF ; boolean delimiterFound = false ; while ( undecodedChunk . isReadable ( ) ) { final byte nextByte = undecodedChunk . readByte ( ) ; if ( prevByte == HttpConstants . LF && nextByte == delimiter . codePointAt ( index ) ) { index ++ ; if ( delimeterLength == index ) { delimiterFound = true ; break ; } continue ; } lastPosition = undecodedChunk . readerIndex ( ) ; if ( nextByte == HttpConstants . LF ) { index = 0 ; lastPosition -= ( prevByte == HttpConstants . CR ) ? 2 : 1 ; } prevByte = nextByte ; } if ( prevByte == HttpConstants . CR ) { lastPosition -- ; } ByteBuf content = undecodedChunk . copy ( startReaderIndex , lastPosition - startReaderIndex ) ; try { httpData . addContent ( content , delimiterFound ) ; } catch ( IOException e ) { throw new ErrorDataDecoderException ( e ) ; } undecodedChunk . readerIndex ( lastPosition ) ; return delimiterFound ; }
Load the field value or file data from a Multipart request
14,071
private static boolean loadDataMultipart ( ByteBuf undecodedChunk , String delimiter , HttpData httpData ) { if ( ! undecodedChunk . hasArray ( ) ) { return loadDataMultipartStandard ( undecodedChunk , delimiter , httpData ) ; } final SeekAheadOptimize sao = new SeekAheadOptimize ( undecodedChunk ) ; final int startReaderIndex = undecodedChunk . readerIndex ( ) ; final int delimeterLength = delimiter . length ( ) ; int index = 0 ; int lastRealPos = sao . pos ; byte prevByte = HttpConstants . LF ; boolean delimiterFound = false ; while ( sao . pos < sao . limit ) { final byte nextByte = sao . bytes [ sao . pos ++ ] ; if ( prevByte == HttpConstants . LF && nextByte == delimiter . codePointAt ( index ) ) { index ++ ; if ( delimeterLength == index ) { delimiterFound = true ; break ; } continue ; } lastRealPos = sao . pos ; if ( nextByte == HttpConstants . LF ) { index = 0 ; lastRealPos -= ( prevByte == HttpConstants . CR ) ? 2 : 1 ; } prevByte = nextByte ; } if ( prevByte == HttpConstants . CR ) { lastRealPos -- ; } final int lastPosition = sao . getReadPosition ( lastRealPos ) ; final ByteBuf content = undecodedChunk . copy ( startReaderIndex , lastPosition - startReaderIndex ) ; try { httpData . addContent ( content , delimiterFound ) ; } catch ( IOException e ) { throw new ErrorDataDecoderException ( e ) ; } undecodedChunk . readerIndex ( lastPosition ) ; return delimiterFound ; }
Load the field value from a Multipart request
14,072
private static String cleanString ( String field ) { int size = field . length ( ) ; StringBuilder sb = new StringBuilder ( size ) ; for ( int i = 0 ; i < size ; i ++ ) { char nextChar = field . charAt ( i ) ; switch ( nextChar ) { case HttpConstants . COLON : case HttpConstants . COMMA : case HttpConstants . EQUALS : case HttpConstants . SEMICOLON : case HttpConstants . HT : sb . append ( HttpConstants . SP_CHAR ) ; break ; case HttpConstants . DOUBLE_QUOTE : break ; default : sb . append ( nextChar ) ; break ; } } return sb . toString ( ) . trim ( ) ; }
Clean the String from any unallowed character
14,073
private boolean skipOneLine ( ) { if ( ! undecodedChunk . isReadable ( ) ) { return false ; } byte nextByte = undecodedChunk . readByte ( ) ; if ( nextByte == HttpConstants . CR ) { if ( ! undecodedChunk . isReadable ( ) ) { undecodedChunk . readerIndex ( undecodedChunk . readerIndex ( ) - 1 ) ; return false ; } nextByte = undecodedChunk . readByte ( ) ; if ( nextByte == HttpConstants . LF ) { return true ; } undecodedChunk . readerIndex ( undecodedChunk . readerIndex ( ) - 2 ) ; return false ; } if ( nextByte == HttpConstants . LF ) { return true ; } undecodedChunk . readerIndex ( undecodedChunk . readerIndex ( ) - 1 ) ; return false ; }
Skip one empty line
14,074
private static String [ ] splitMultipartHeader ( String sb ) { ArrayList < String > headers = new ArrayList < String > ( 1 ) ; int nameStart ; int nameEnd ; int colonEnd ; int valueStart ; int valueEnd ; nameStart = HttpPostBodyUtil . findNonWhitespace ( sb , 0 ) ; for ( nameEnd = nameStart ; nameEnd < sb . length ( ) ; nameEnd ++ ) { char ch = sb . charAt ( nameEnd ) ; if ( ch == ':' || Character . isWhitespace ( ch ) ) { break ; } } for ( colonEnd = nameEnd ; colonEnd < sb . length ( ) ; colonEnd ++ ) { if ( sb . charAt ( colonEnd ) == ':' ) { colonEnd ++ ; break ; } } valueStart = HttpPostBodyUtil . findNonWhitespace ( sb , colonEnd ) ; valueEnd = HttpPostBodyUtil . findEndOfString ( sb ) ; headers . add ( sb . substring ( nameStart , nameEnd ) ) ; String svalue = ( valueStart >= valueEnd ) ? StringUtil . EMPTY_STRING : sb . substring ( valueStart , valueEnd ) ; String [ ] values ; if ( svalue . indexOf ( ';' ) >= 0 ) { values = splitMultipartHeaderValues ( svalue ) ; } else { values = svalue . split ( "," ) ; } for ( String value : values ) { headers . add ( value . trim ( ) ) ; } String [ ] array = new String [ headers . size ( ) ] ; for ( int i = 0 ; i < headers . size ( ) ; i ++ ) { array [ i ] = headers . get ( i ) ; } return array ; }
Split one header in Multipart
14,075
private static String [ ] splitMultipartHeaderValues ( String svalue ) { List < String > values = InternalThreadLocalMap . get ( ) . arrayList ( 1 ) ; boolean inQuote = false ; boolean escapeNext = false ; int start = 0 ; for ( int i = 0 ; i < svalue . length ( ) ; i ++ ) { char c = svalue . charAt ( i ) ; if ( inQuote ) { if ( escapeNext ) { escapeNext = false ; } else { if ( c == '\\' ) { escapeNext = true ; } else if ( c == '"' ) { inQuote = false ; } } } else { if ( c == '"' ) { inQuote = true ; } else if ( c == ';' ) { values . add ( svalue . substring ( start , i ) ) ; start = i + 1 ; } } } values . add ( svalue . substring ( start ) ) ; return values . toArray ( new String [ 0 ] ) ; }
Split one header value in Multipart
14,076
static void handle ( ChannelHandlerContext ctx , Http2Connection connection , Http2FrameListener listener , FullHttpMessage message ) throws Http2Exception { try { int streamId = getStreamId ( connection , message . headers ( ) ) ; Http2Stream stream = connection . stream ( streamId ) ; if ( stream == null ) { stream = connection . remote ( ) . createStream ( streamId , false ) ; } message . headers ( ) . set ( HttpConversionUtil . ExtensionHeaderNames . SCHEME . text ( ) , HttpScheme . HTTP . name ( ) ) ; Http2Headers messageHeaders = HttpConversionUtil . toHttp2Headers ( message , true ) ; boolean hasContent = message . content ( ) . isReadable ( ) ; boolean hasTrailers = ! message . trailingHeaders ( ) . isEmpty ( ) ; listener . onHeadersRead ( ctx , streamId , messageHeaders , 0 , ! ( hasContent || hasTrailers ) ) ; if ( hasContent ) { listener . onDataRead ( ctx , streamId , message . content ( ) , 0 , ! hasTrailers ) ; } if ( hasTrailers ) { Http2Headers headers = HttpConversionUtil . toHttp2Headers ( message . trailingHeaders ( ) , true ) ; listener . onHeadersRead ( ctx , streamId , headers , 0 , true ) ; } stream . closeRemoteSide ( ) ; } finally { message . release ( ) ; } }
control but there is not yet an API for signaling that .
14,077
private void writeSymbolMap ( ByteBuf out ) { Bzip2BitWriter writer = this . writer ; final boolean [ ] blockValuesPresent = this . blockValuesPresent ; final boolean [ ] condensedInUse = new boolean [ 16 ] ; for ( int i = 0 ; i < condensedInUse . length ; i ++ ) { for ( int j = 0 , k = i << 4 ; j < HUFFMAN_SYMBOL_RANGE_SIZE ; j ++ , k ++ ) { if ( blockValuesPresent [ k ] ) { condensedInUse [ i ] = true ; } } } for ( boolean isCondensedInUse : condensedInUse ) { writer . writeBoolean ( out , isCondensedInUse ) ; } for ( int i = 0 ; i < condensedInUse . length ; i ++ ) { if ( condensedInUse [ i ] ) { for ( int j = 0 , k = i << 4 ; j < HUFFMAN_SYMBOL_RANGE_SIZE ; j ++ , k ++ ) { writer . writeBoolean ( out , blockValuesPresent [ k ] ) ; } } } }
Write the Huffman symbol to output byte map .
14,078
private void writeRun ( final int value , int runLength ) { final int blockLength = this . blockLength ; final byte [ ] block = this . block ; blockValuesPresent [ value ] = true ; crc . updateCRC ( value , runLength ) ; final byte byteValue = ( byte ) value ; switch ( runLength ) { case 1 : block [ blockLength ] = byteValue ; this . blockLength = blockLength + 1 ; break ; case 2 : block [ blockLength ] = byteValue ; block [ blockLength + 1 ] = byteValue ; this . blockLength = blockLength + 2 ; break ; case 3 : block [ blockLength ] = byteValue ; block [ blockLength + 1 ] = byteValue ; block [ blockLength + 2 ] = byteValue ; this . blockLength = blockLength + 3 ; break ; default : runLength -= 4 ; blockValuesPresent [ runLength ] = true ; block [ blockLength ] = byteValue ; block [ blockLength + 1 ] = byteValue ; block [ blockLength + 2 ] = byteValue ; block [ blockLength + 3 ] = byteValue ; block [ blockLength + 4 ] = ( byte ) runLength ; this . blockLength = blockLength + 5 ; break ; } }
Writes an RLE run to the block array updating the block CRC and present values array as required .
14,079
boolean write ( final int value ) { if ( blockLength > blockLengthLimit ) { return false ; } final int rleCurrentValue = this . rleCurrentValue ; final int rleLength = this . rleLength ; if ( rleLength == 0 ) { this . rleCurrentValue = value ; this . rleLength = 1 ; } else if ( rleCurrentValue != value ) { writeRun ( rleCurrentValue & 0xff , rleLength ) ; this . rleCurrentValue = value ; this . rleLength = 1 ; } else { if ( rleLength == 254 ) { writeRun ( rleCurrentValue & 0xff , 255 ) ; this . rleLength = 0 ; } else { this . rleLength = rleLength + 1 ; } } return true ; }
Writes a byte to the block accumulating to an RLE run where possible .
14,080
int write ( final ByteBuf buffer , int offset , int length ) { int index = buffer . forEachByte ( offset , length , writeProcessor ) ; return index == - 1 ? length : index - offset ; }
Writes an array to the block .
14,081
void close ( ByteBuf out ) { if ( rleLength > 0 ) { writeRun ( rleCurrentValue & 0xff , rleLength ) ; } block [ blockLength ] = block [ 0 ] ; Bzip2DivSufSort divSufSort = new Bzip2DivSufSort ( block , bwtBlock , blockLength ) ; int bwtStartPointer = divSufSort . bwt ( ) ; Bzip2BitWriter writer = this . writer ; writer . writeBits ( out , 24 , BLOCK_HEADER_MAGIC_1 ) ; writer . writeBits ( out , 24 , BLOCK_HEADER_MAGIC_2 ) ; writer . writeInt ( out , crc . getCRC ( ) ) ; writer . writeBoolean ( out , false ) ; writer . writeBits ( out , 24 , bwtStartPointer ) ; writeSymbolMap ( out ) ; Bzip2MTFAndRLE2StageEncoder mtfEncoder = new Bzip2MTFAndRLE2StageEncoder ( bwtBlock , blockLength , blockValuesPresent ) ; mtfEncoder . encode ( ) ; Bzip2HuffmanStageEncoder huffmanEncoder = new Bzip2HuffmanStageEncoder ( writer , mtfEncoder . mtfBlock ( ) , mtfEncoder . mtfLength ( ) , mtfEncoder . mtfAlphabetSize ( ) , mtfEncoder . mtfSymbolFrequencies ( ) ) ; huffmanEncoder . encode ( out ) ; }
Compresses and writes out the block .
14,082
private void processStreamUpload ( final UploadStream req ) { assert ( req . body ( ) == null ) ; try { RpcResponseCallback callback = new RpcResponseCallback ( ) { public void onSuccess ( ByteBuffer response ) { respond ( new RpcResponse ( req . requestId , new NioManagedBuffer ( response ) ) ) ; } public void onFailure ( Throwable e ) { respond ( new RpcFailure ( req . requestId , Throwables . getStackTraceAsString ( e ) ) ) ; } } ; TransportFrameDecoder frameDecoder = ( TransportFrameDecoder ) channel . pipeline ( ) . get ( TransportFrameDecoder . HANDLER_NAME ) ; ByteBuffer meta = req . meta . nioByteBuffer ( ) ; StreamCallbackWithID streamHandler = rpcHandler . receiveStream ( reverseClient , meta , callback ) ; if ( streamHandler == null ) { throw new NullPointerException ( "rpcHandler returned a null streamHandler" ) ; } StreamCallbackWithID wrappedCallback = new StreamCallbackWithID ( ) { public void onData ( String streamId , ByteBuffer buf ) throws IOException { streamHandler . onData ( streamId , buf ) ; } public void onComplete ( String streamId ) throws IOException { try { streamHandler . onComplete ( streamId ) ; callback . onSuccess ( ByteBuffer . allocate ( 0 ) ) ; } catch ( Exception ex ) { IOException ioExc = new IOException ( "Failure post-processing complete stream;" + " failing this rpc and leaving channel active" , ex ) ; callback . onFailure ( ioExc ) ; streamHandler . onFailure ( streamId , ioExc ) ; } } public void onFailure ( String streamId , Throwable cause ) throws IOException { callback . onFailure ( new IOException ( "Destination failed while reading stream" , cause ) ) ; streamHandler . onFailure ( streamId , cause ) ; } public String getID ( ) { return streamHandler . getID ( ) ; } } ; if ( req . bodyByteCount > 0 ) { StreamInterceptor < RequestMessage > interceptor = new StreamInterceptor < > ( this , wrappedCallback . getID ( ) , req . bodyByteCount , wrappedCallback ) ; frameDecoder . setInterceptor ( interceptor ) ; } else { wrappedCallback . onComplete ( wrappedCallback . getID ( ) ) ; } } catch ( Exception e ) { logger . error ( "Error while invoking RpcHandler#receive() on RPC id " + req . requestId , e ) ; respond ( new RpcFailure ( req . requestId , Throwables . getStackTraceAsString ( e ) ) ) ; channel . pipeline ( ) . fireExceptionCaught ( e ) ; } finally { req . meta . release ( ) ; } }
Handle a request from the client to upload a stream of data .
14,083
private ChannelFuture respond ( Encodable result ) { SocketAddress remoteAddress = channel . remoteAddress ( ) ; return channel . writeAndFlush ( result ) . addListener ( future -> { if ( future . isSuccess ( ) ) { logger . trace ( "Sent result {} to client {}" , result , remoteAddress ) ; } else { logger . error ( String . format ( "Error sending result %s to %s; closing connection" , result , remoteAddress ) , future . cause ( ) ) ; channel . close ( ) ; } } ) ; }
Responds to a single message with some Encodable object . If a failure occurs while sending it will be logged and the channel closed .
14,084
private void receive ( ) { try { Socket socket = null ; BufferedReader reader = null ; try { socket = new Socket ( host , port ) ; reader = new BufferedReader ( new InputStreamReader ( socket . getInputStream ( ) , StandardCharsets . UTF_8 ) ) ; String userInput ; while ( ! isStopped ( ) && ( userInput = reader . readLine ( ) ) != null ) { System . out . println ( "Received data '" + userInput + "'" ) ; store ( userInput ) ; } } finally { Closeables . close ( reader , true ) ; Closeables . close ( socket , true ) ; } restart ( "Trying to connect again" ) ; } catch ( ConnectException ce ) { restart ( "Could not connect" , ce ) ; } catch ( Throwable t ) { restart ( "Error receiving data" , t ) ; } }
Create a socket connection and receive data until receiver is stopped
14,085
public static StorageLevel create ( boolean useDisk , boolean useMemory , boolean useOffHeap , boolean deserialized , int replication ) { return StorageLevel . apply ( useDisk , useMemory , useOffHeap , deserialized , replication ) ; }
Create a new StorageLevel object .
14,086
public static ColumnarBatch toBatch ( StructType schema , MemoryMode memMode , Iterator < Row > row ) { int capacity = 4 * 1024 ; WritableColumnVector [ ] columnVectors ; if ( memMode == MemoryMode . OFF_HEAP ) { columnVectors = OffHeapColumnVector . allocateColumns ( capacity , schema ) ; } else { columnVectors = OnHeapColumnVector . allocateColumns ( capacity , schema ) ; } int n = 0 ; while ( row . hasNext ( ) ) { Row r = row . next ( ) ; for ( int i = 0 ; i < schema . fields ( ) . length ; i ++ ) { appendValue ( columnVectors [ i ] , schema . fields ( ) [ i ] . dataType ( ) , r , i ) ; } n ++ ; } ColumnarBatch batch = new ColumnarBatch ( columnVectors ) ; batch . setNumRows ( n ) ; return batch ; }
Converts an iterator of rows into a single ColumnBatch .
14,087
public void registerExecutor ( String appId , String execId , ExecutorShuffleInfo executorInfo ) { AppExecId fullId = new AppExecId ( appId , execId ) ; logger . info ( "Registered executor {} with {}" , fullId , executorInfo ) ; if ( ! knownManagers . contains ( executorInfo . shuffleManager ) ) { throw new UnsupportedOperationException ( "Unsupported shuffle manager of executor: " + executorInfo ) ; } try { if ( db != null ) { byte [ ] key = dbAppExecKey ( fullId ) ; byte [ ] value = mapper . writeValueAsString ( executorInfo ) . getBytes ( StandardCharsets . UTF_8 ) ; db . put ( key , value ) ; } } catch ( Exception e ) { logger . error ( "Error saving registered executors" , e ) ; } executors . put ( fullId , executorInfo ) ; }
Registers a new Executor with all the configuration we need to find its shuffle files .
14,088
public void applicationRemoved ( String appId , boolean cleanupLocalDirs ) { logger . info ( "Application {} removed, cleanupLocalDirs = {}" , appId , cleanupLocalDirs ) ; Iterator < Map . Entry < AppExecId , ExecutorShuffleInfo > > it = executors . entrySet ( ) . iterator ( ) ; while ( it . hasNext ( ) ) { Map . Entry < AppExecId , ExecutorShuffleInfo > entry = it . next ( ) ; AppExecId fullId = entry . getKey ( ) ; final ExecutorShuffleInfo executor = entry . getValue ( ) ; if ( appId . equals ( fullId . appId ) ) { it . remove ( ) ; if ( db != null ) { try { db . delete ( dbAppExecKey ( fullId ) ) ; } catch ( IOException e ) { logger . error ( "Error deleting {} from executor state db" , appId , e ) ; } } if ( cleanupLocalDirs ) { logger . info ( "Cleaning up executor {}'s {} local dirs" , fullId , executor . localDirs . length ) ; directoryCleaner . execute ( ( ) -> deleteExecutorDirs ( executor . localDirs ) ) ; } } } }
Removes our metadata of all executors registered for the given application and optionally also deletes the local directories associated with the executors of that application in a separate thread .
14,089
public void executorRemoved ( String executorId , String appId ) { logger . info ( "Clean up non-shuffle files associated with the finished executor {}" , executorId ) ; AppExecId fullId = new AppExecId ( appId , executorId ) ; final ExecutorShuffleInfo executor = executors . get ( fullId ) ; if ( executor == null ) { logger . info ( "Executor is not registered (appId={}, execId={})" , appId , executorId ) ; } else { logger . info ( "Cleaning up non-shuffle files in executor {}'s {} local dirs" , fullId , executor . localDirs . length ) ; directoryCleaner . execute ( ( ) -> deleteNonShuffleFiles ( executor . localDirs ) ) ; } }
Removes all the non - shuffle files in any local directories associated with the finished executor .
14,090
private void deleteExecutorDirs ( String [ ] dirs ) { for ( String localDir : dirs ) { try { JavaUtils . deleteRecursively ( new File ( localDir ) ) ; logger . debug ( "Successfully cleaned up directory: {}" , localDir ) ; } catch ( Exception e ) { logger . error ( "Failed to delete directory: " + localDir , e ) ; } } }
Synchronously deletes each directory one at a time . Should be executed in its own thread as this may take a long time .
14,091
private void deleteNonShuffleFiles ( String [ ] dirs ) { FilenameFilter filter = new FilenameFilter ( ) { public boolean accept ( File dir , String name ) { return ! name . endsWith ( ".index" ) && ! name . endsWith ( ".data" ) ; } } ; for ( String localDir : dirs ) { try { JavaUtils . deleteRecursively ( new File ( localDir ) , filter ) ; logger . debug ( "Successfully cleaned up non-shuffle files in directory: {}" , localDir ) ; } catch ( Exception e ) { logger . error ( "Failed to delete non-shuffle files in directory: " + localDir , e ) ; } } }
Synchronously deletes non - shuffle files in each directory recursively . Should be executed in its own thread as this may take a long time .
14,092
private ManagedBuffer getSortBasedShuffleBlockData ( ExecutorShuffleInfo executor , int shuffleId , int mapId , int reduceId ) { File indexFile = getFile ( executor . localDirs , executor . subDirsPerLocalDir , "shuffle_" + shuffleId + "_" + mapId + "_0.index" ) ; try { ShuffleIndexInformation shuffleIndexInformation = shuffleIndexCache . get ( indexFile ) ; ShuffleIndexRecord shuffleIndexRecord = shuffleIndexInformation . getIndex ( reduceId ) ; return new FileSegmentManagedBuffer ( conf , getFile ( executor . localDirs , executor . subDirsPerLocalDir , "shuffle_" + shuffleId + "_" + mapId + "_0.data" ) , shuffleIndexRecord . getOffset ( ) , shuffleIndexRecord . getLength ( ) ) ; } catch ( ExecutionException e ) { throw new RuntimeException ( "Failed to open file: " + indexFile , e ) ; } }
Sort - based shuffle data uses an index called shuffle_ShuffleId_MapId_0 . index into a data file called shuffle_ShuffleId_MapId_0 . data . This logic is from IndexShuffleBlockResolver and the block id format is from ShuffleDataBlockId and ShuffleIndexBlockId .
14,093
public void insertRecord ( long recordPointer , int partitionId ) { if ( ! hasSpaceForAnotherRecord ( ) ) { throw new IllegalStateException ( "There is no space for new record" ) ; } array . set ( pos , PackedRecordPointer . packPointer ( recordPointer , partitionId ) ) ; pos ++ ; }
Inserts a record to be sorted .
14,094
public ShuffleSorterIterator getSortedIterator ( ) { int offset = 0 ; if ( useRadixSort ) { offset = RadixSort . sort ( array , pos , PackedRecordPointer . PARTITION_ID_START_BYTE_INDEX , PackedRecordPointer . PARTITION_ID_END_BYTE_INDEX , false , false ) ; } else { MemoryBlock unused = new MemoryBlock ( array . getBaseObject ( ) , array . getBaseOffset ( ) + pos * 8L , ( array . size ( ) - pos ) * 8L ) ; LongArray buffer = new LongArray ( unused ) ; Sorter < PackedRecordPointer , LongArray > sorter = new Sorter < > ( new ShuffleSortDataFormat ( buffer ) ) ; sorter . sort ( array , 0 , pos , SORT_COMPARATOR ) ; } return new ShuffleSorterIterator ( pos , array , offset ) ; }
Return an iterator over record pointers in sorted order .
14,095
private void failOutstandingRequests ( Throwable cause ) { for ( Map . Entry < StreamChunkId , ChunkReceivedCallback > entry : outstandingFetches . entrySet ( ) ) { try { entry . getValue ( ) . onFailure ( entry . getKey ( ) . chunkIndex , cause ) ; } catch ( Exception e ) { logger . warn ( "ChunkReceivedCallback.onFailure throws exception" , e ) ; } } for ( Map . Entry < Long , RpcResponseCallback > entry : outstandingRpcs . entrySet ( ) ) { try { entry . getValue ( ) . onFailure ( cause ) ; } catch ( Exception e ) { logger . warn ( "RpcResponseCallback.onFailure throws exception" , e ) ; } } for ( Pair < String , StreamCallback > entry : streamCallbacks ) { try { entry . getValue ( ) . onFailure ( entry . getKey ( ) , cause ) ; } catch ( Exception e ) { logger . warn ( "StreamCallback.onFailure throws exception" , e ) ; } } outstandingFetches . clear ( ) ; outstandingRpcs . clear ( ) ; streamCallbacks . clear ( ) ; }
Fire the failure callback for all outstanding requests . This is called when we have an uncaught exception or pre - mature connection termination .
14,096
protected void serviceInit ( Configuration conf ) throws Exception { _conf = conf ; boolean stopOnFailure = conf . getBoolean ( STOP_ON_FAILURE_KEY , DEFAULT_STOP_ON_FAILURE ) ; try { if ( _recoveryPath != null ) { registeredExecutorFile = initRecoveryDb ( RECOVERY_FILE_NAME ) ; } TransportConf transportConf = new TransportConf ( "shuffle" , new HadoopConfigProvider ( conf ) ) ; blockHandler = new ExternalShuffleBlockHandler ( transportConf , registeredExecutorFile ) ; List < TransportServerBootstrap > bootstraps = Lists . newArrayList ( ) ; boolean authEnabled = conf . getBoolean ( SPARK_AUTHENTICATE_KEY , DEFAULT_SPARK_AUTHENTICATE ) ; if ( authEnabled ) { secretManager = new ShuffleSecretManager ( ) ; if ( _recoveryPath != null ) { loadSecretsFromDb ( ) ; } bootstraps . add ( new AuthServerBootstrap ( transportConf , secretManager ) ) ; } int port = conf . getInt ( SPARK_SHUFFLE_SERVICE_PORT_KEY , DEFAULT_SPARK_SHUFFLE_SERVICE_PORT ) ; transportContext = new TransportContext ( transportConf , blockHandler ) ; shuffleServer = transportContext . createServer ( port , bootstraps ) ; port = shuffleServer . getPort ( ) ; boundPort = port ; String authEnabledString = authEnabled ? "enabled" : "not enabled" ; blockHandler . getAllMetrics ( ) . getMetrics ( ) . put ( "numRegisteredConnections" , shuffleServer . getRegisteredConnections ( ) ) ; YarnShuffleServiceMetrics serviceMetrics = new YarnShuffleServiceMetrics ( blockHandler . getAllMetrics ( ) ) ; MetricsSystemImpl metricsSystem = ( MetricsSystemImpl ) DefaultMetricsSystem . instance ( ) ; metricsSystem . register ( "sparkShuffleService" , "Metrics on the Spark Shuffle Service" , serviceMetrics ) ; logger . info ( "Registered metrics with Hadoop's DefaultMetricsSystem" ) ; logger . info ( "Started YARN shuffle service for Spark on port {}. " + "Authentication is {}. Registered executor file is {}" , port , authEnabledString , registeredExecutorFile ) ; } catch ( Exception e ) { if ( stopOnFailure ) { throw e ; } else { noteFailure ( e ) ; } } }
Start the shuffle server with the given configuration .
14,097
protected void serviceStop ( ) { try { if ( shuffleServer != null ) { shuffleServer . close ( ) ; } if ( transportContext != null ) { transportContext . close ( ) ; } if ( blockHandler != null ) { blockHandler . close ( ) ; } if ( db != null ) { db . close ( ) ; } } catch ( Exception e ) { logger . error ( "Exception when stopping service" , e ) ; } }
Close the shuffle server to clean up any associated state .
14,098
protected File initRecoveryDb ( String dbName ) { Preconditions . checkNotNull ( _recoveryPath , "recovery path should not be null if NM recovery is enabled" ) ; File recoveryFile = new File ( _recoveryPath . toUri ( ) . getPath ( ) , dbName ) ; if ( recoveryFile . exists ( ) ) { return recoveryFile ; } String [ ] localDirs = _conf . getTrimmedStrings ( "yarn.nodemanager.local-dirs" ) ; for ( String dir : localDirs ) { File f = new File ( new Path ( dir ) . toUri ( ) . getPath ( ) , dbName ) ; if ( f . exists ( ) ) { Path newLoc = new Path ( _recoveryPath , dbName ) ; Path copyFrom = new Path ( f . toURI ( ) ) ; if ( ! newLoc . equals ( copyFrom ) ) { logger . info ( "Moving " + copyFrom + " to: " + newLoc ) ; try { FileSystem fs = FileSystem . getLocal ( _conf ) ; fs . rename ( copyFrom , newLoc ) ; } catch ( Exception e ) { logger . error ( "Failed to move recovery file {} to the path {}" , dbName , _recoveryPath . toString ( ) , e ) ; } } return new File ( newLoc . toUri ( ) . getPath ( ) ) ; } } return new File ( _recoveryPath . toUri ( ) . getPath ( ) , dbName ) ; }
Figure out the recovery path and handle moving the DB if YARN NM recovery gets enabled and DB exists in the local dir of NM by old version of shuffle service .
14,099
private static List < String > buildCommand ( AbstractCommandBuilder builder , Map < String , String > env , boolean printLaunchCommand ) throws IOException , IllegalArgumentException { List < String > cmd = builder . buildCommand ( env ) ; if ( printLaunchCommand ) { System . err . println ( "Spark Command: " + join ( " " , cmd ) ) ; System . err . println ( "========================================" ) ; } return cmd ; }
Prepare spark commands with the appropriate command builder . If printLaunchCommand is set then the commands will be printed to the stderr .