switch (currentState) { case SKIP_CONTROL_CHARS: // Fall-through case READ_INITIAL: try { AppendableCharSequence line = lineParser.parse(buffer); if (line == null) { return; } String[] initialLine = splitInitialLine(line); if (initialLine.length < 3) { // Invalid initial line - ignore. currentState = State.SKIP_CONTROL_CHARS; return; }
message = createMessage(initialLine); currentState = State.READ_HEADER; // fall-through } catch (Exception e) { out.add(invalidMessage(buffer, e)); return; } case READ_HEADER: try { State nextState = readHeaders(buffer); if (nextState == null) { return; } currentState = nextState; switch (nextState) { case SKIP_CONTROL_CHARS: // fast-path // No content is expected. out.add(message); out.add(LastHttpContent.EMPTY_LAST_CONTENT); resetNow(); return; case READ_CHUNK_SIZE: if (!chunkedSupported) { thrownew IllegalArgumentException("Chunked messages not supported"); } // Chunked encoding - generate HttpMessage first. HttpChunks will follow. out.add(message); return; default: /** * <a href="https://tools.ietf.org/html/rfc7230#section-3.3.3">RFC 7230, 3.3.3</a> states that if a * request does not have either a transfer-encoding or a content-length header then the message body * length is 0. However for a response the body length is the number of octets received prior to the * server closing the connection. So we treat this as variable length chunked encoding. */ long contentLength = contentLength(); if (contentLength == 0 || contentLength == -1 && isDecodingRequest()) { out.add(message); out.add(LastHttpContent.EMPTY_LAST_CONTENT); resetNow(); return; }
if (nextState == State.READ_FIXED_LENGTH_CONTENT) { // chunkSize will be decreased as the READ_FIXED_LENGTH_CONTENT state reads data chunk by chunk. chunkSize = contentLength; }
// We return here, this forces decode to be called again where we will decode the content return; } } catch (Exception e) { out.add(invalidMessage(buffer, e)); return; } case READ_VARIABLE_LENGTH_CONTENT: { // Keep reading data as a chunk until the end of connection is reached. int toRead = Math.min(buffer.readableBytes(), maxChunkSize); if (toRead > 0) { ByteBuf content = buffer.readRetainedSlice(toRead); out.add(new DefaultHttpContent(content)); } return; } case READ_FIXED_LENGTH_CONTENT: { int readLimit = buffer.readableBytes();
// Check if the buffer is readable first as we use the readable byte count // to create the HttpChunk. This is needed as otherwise we may end up with // create an HttpChunk instance that contains an empty buffer and so is // handled like it is the last HttpChunk. // // See https://github.com/netty/netty/issues/433 if (readLimit == 0) { return; }
if (chunkSize == 0) { // Read all content. out.add(new DefaultLastHttpContent(content, validateHeaders)); resetNow(); } else { out.add(new DefaultHttpContent(content)); } return; } /** * everything else after this point takes care of reading chunked content. basically, read chunk size, * read chunk, read and ignore the CRLF and repeat until 0 */ case READ_CHUNK_SIZE: try { AppendableCharSequence line = lineParser.parse(buffer); if (line == null) { return; } int chunkSize = getChunkSize(line.toString()); this.chunkSize = chunkSize; if (chunkSize == 0) { currentState = State.READ_CHUNK_FOOTER; return; } currentState = State.READ_CHUNKED_CONTENT; // fall-through } catch (Exception e) { out.add(invalidChunk(buffer, e)); return; } case READ_CHUNKED_CONTENT: { assert chunkSize <= Integer.MAX_VALUE; int toRead = Math.min((int) chunkSize, maxChunkSize); if (!allowPartialChunks && buffer.readableBytes() < toRead) { return; } toRead = Math.min(toRead, buffer.readableBytes()); if (toRead == 0) { return; } HttpContent chunk = new DefaultHttpContent(buffer.readRetainedSlice(toRead)); chunkSize -= toRead;
out.add(chunk);
if (chunkSize != 0) { return; } currentState = State.READ_CHUNK_DELIMITER; // fall-through } case READ_CHUNK_DELIMITER: { finalint wIdx = buffer.writerIndex(); int rIdx = buffer.readerIndex(); while (wIdx > rIdx) { byte next = buffer.getByte(rIdx++); if (next == HttpConstants.LF) { currentState = State.READ_CHUNK_SIZE; break; } } buffer.readerIndex(rIdx); return; } case READ_CHUNK_FOOTER: try { LastHttpContent trailer = readTrailingHeaders(buffer); if (trailer == null) { return; } out.add(trailer); resetNow(); return; } catch (Exception e) { out.add(invalidChunk(buffer, e)); return; } case BAD_MESSAGE: { // Keep discarding until disconnection. buffer.skipBytes(buffer.readableBytes()); break; } case UPGRADED: { int readableBytes = buffer.readableBytes(); if (readableBytes > 0) { // Keep on consuming as otherwise we may trigger an DecoderException, // other handler will replace this codec with the upgraded protocol codec to // take the traffic over at some point then. // See https://github.com/netty/netty/issues/2173 out.add(buffer.readBytes(readableBytes)); } break; } default: break; } }
// Bypass the encoder in case of an empty buffer, so that the following idiom works: // // ch.write(Unpooled.EMPTY_BUFFER).addListener(ChannelFutureListener.CLOSE); // // See https://github.com/netty/netty/issues/2983 for more information. if (msg instanceof ByteBuf) { final ByteBuf potentialEmptyBuf = (ByteBuf) msg; if (!potentialEmptyBuf.isReadable()) { out.add(potentialEmptyBuf.retain()); return; } }
if (msg instanceof HttpContent || msg instanceof ByteBuf || msg instanceof FileRegion) { switch (state) { case ST_INIT: thrownew IllegalStateException("unexpected message type: " + StringUtil.simpleClassName(msg) + ", state: " + state); case ST_CONTENT_NON_CHUNK: finallong contentLength = contentLength(msg); if (contentLength > 0) { if (buf != null && buf.writableBytes() >= contentLength && msg instanceof HttpContent) { // merge into other buffer for performance reasons buf.writeBytes(((HttpContent) msg).content()); out.add(buf); } else { if (buf != null) { out.add(buf); } out.add(encodeAndRetain(msg)); }
if (msg instanceof LastHttpContent) { state = ST_INIT; }
break; }
// fall-through! case ST_CONTENT_ALWAYS_EMPTY:
if (buf != null) { // We allocated a buffer so add it now. out.add(buf); } else { // Need to produce some output otherwise an // IllegalStateException will be thrown as we did not write anything // Its ok to just write an EMPTY_BUFFER as if there are reference count issues these will be // propagated as the caller of the encode(...) method will release the original // buffer. // Writing an empty buffer will not actually write anything on the wire, so if there is a user // error with msg it will not be visible externally out.add(Unpooled.EMPTY_BUFFER); }
break; case ST_CONTENT_CHUNK: if (buf != null) { // We allocated a buffer so add it now. out.add(buf); } encodeChunkedContent(ctx, msg, contentLength(msg), out);
break; default: thrownew Error(); }
if (msg instanceof LastHttpContent) { state = ST_INIT; } } elseif (buf != null) { out.add(buf); } }