[ci skip] Cleanup
This commit is contained in:
@@ -1,30 +0,0 @@
|
|||||||
package io.akarin.api.internal.utils;
|
|
||||||
|
|
||||||
import java.util.concurrent.atomic.AtomicBoolean;
|
|
||||||
|
|
||||||
public class ReentrantSpinningDebugLock extends ReentrantSpinningLock {
|
|
||||||
private final AtomicBoolean singleLock = new AtomicBoolean(false);
|
|
||||||
private long heldThreadId = 0;
|
|
||||||
private int reentrantLocks = 0;
|
|
||||||
public StackTraceElement[] tStackTraceElements;
|
|
||||||
|
|
||||||
public void lock() {
|
|
||||||
long currentThreadId = Thread.currentThread().getId();
|
|
||||||
if (heldThreadId == currentThreadId) {
|
|
||||||
reentrantLocks++;
|
|
||||||
} else {
|
|
||||||
while (!singleLock.compareAndSet(false, true)) ; // In case acquire one lock concurrently
|
|
||||||
heldThreadId = currentThreadId;
|
|
||||||
}
|
|
||||||
tStackTraceElements = Thread.currentThread().getStackTrace();
|
|
||||||
}
|
|
||||||
|
|
||||||
public void unlock() {
|
|
||||||
if (reentrantLocks == 0) {
|
|
||||||
heldThreadId = 0;
|
|
||||||
singleLock.set(false);
|
|
||||||
} else {
|
|
||||||
--reentrantLocks;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -0,0 +1,7 @@
|
|||||||
|
package io.akarin.api.internal.utils.thread;
|
||||||
|
|
||||||
|
import java.util.concurrent.ExecutionException;
|
||||||
|
|
||||||
|
public class OpenExecutionException extends ExecutionException {
|
||||||
|
private static final long serialVersionUID = 7830266012832686185L;
|
||||||
|
}
|
||||||
@@ -1,318 +0,0 @@
|
|||||||
/*
|
|
||||||
* ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
* Written by Doug Lea with assistance from members of JCP JSR-166
|
|
||||||
* Expert Group and released to the public domain, as explained at
|
|
||||||
* http://creativecommons.org/publicdomain/zero/1.0/
|
|
||||||
*/
|
|
||||||
|
|
||||||
package io.akarin.api.internal.utils.thread;
|
|
||||||
import java.util.*;
|
|
||||||
import java.util.concurrent.AbstractExecutorService;
|
|
||||||
import java.util.concurrent.Callable;
|
|
||||||
import java.util.concurrent.CancellationException;
|
|
||||||
import java.util.concurrent.ExecutionException;
|
|
||||||
import java.util.concurrent.ExecutorCompletionService;
|
|
||||||
import java.util.concurrent.ExecutorService;
|
|
||||||
import java.util.concurrent.Future;
|
|
||||||
import java.util.concurrent.FutureTask;
|
|
||||||
import java.util.concurrent.RunnableFuture;
|
|
||||||
import java.util.concurrent.TimeUnit;
|
|
||||||
import java.util.concurrent.TimeoutException;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Provides default implementations of {@link ExecutorService}
|
|
||||||
* execution methods. This class implements the {@code submit},
|
|
||||||
* {@code invokeAny} and {@code invokeAll} methods using a
|
|
||||||
* {@link RunnableFuture} returned by {@code newTaskFor}, which defaults
|
|
||||||
* to the {@link FutureTask} class provided in this package. For example,
|
|
||||||
* the implementation of {@code submit(Runnable)} creates an
|
|
||||||
* associated {@code RunnableFuture} that is executed and
|
|
||||||
* returned. Subclasses may override the {@code newTaskFor} methods
|
|
||||||
* to return {@code RunnableFuture} implementations other than
|
|
||||||
* {@code FutureTask}.
|
|
||||||
*
|
|
||||||
* <p><b>Extension example</b>. Here is a sketch of a class
|
|
||||||
* that customizes {@link ThreadPoolExecutor} to use
|
|
||||||
* a {@code CustomTask} class instead of the default {@code FutureTask}:
|
|
||||||
* <pre> {@code
|
|
||||||
* public class CustomThreadPoolExecutor extends ThreadPoolExecutor {
|
|
||||||
*
|
|
||||||
* static class CustomTask<V> implements RunnableFuture<V> {...}
|
|
||||||
*
|
|
||||||
* protected <V> RunnableFuture<V> newTaskFor(Callable<V> c) {
|
|
||||||
* return new CustomTask<V>(c);
|
|
||||||
* }
|
|
||||||
* protected <V> RunnableFuture<V> newTaskFor(Runnable r, V v) {
|
|
||||||
* return new CustomTask<V>(r, v);
|
|
||||||
* }
|
|
||||||
* // ... add constructors, etc.
|
|
||||||
* }}</pre>
|
|
||||||
*
|
|
||||||
* @since 1.5
|
|
||||||
* @author Doug Lea
|
|
||||||
*/
|
|
||||||
public abstract class SuspendableAbstractExecutorService extends AbstractExecutorService implements ExecutorService {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns a {@code RunnableFuture} for the given runnable and default
|
|
||||||
* value.
|
|
||||||
*
|
|
||||||
* @param runnable the runnable task being wrapped
|
|
||||||
* @param value the default value for the returned future
|
|
||||||
* @param <T> the type of the given value
|
|
||||||
* @return a {@code RunnableFuture} which, when run, will run the
|
|
||||||
* underlying runnable and which, as a {@code Future}, will yield
|
|
||||||
* the given value as its result and provide for cancellation of
|
|
||||||
* the underlying task
|
|
||||||
* @since 1.6
|
|
||||||
*/
|
|
||||||
protected <T> RunnableFuture<T> newTaskFor(Runnable runnable, T value) {
|
|
||||||
return new FutureTask<T>(runnable, value);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns a {@code RunnableFuture} for the given callable task.
|
|
||||||
*
|
|
||||||
* @param callable the callable task being wrapped
|
|
||||||
* @param <T> the type of the callable's result
|
|
||||||
* @return a {@code RunnableFuture} which, when run, will call the
|
|
||||||
* underlying callable and which, as a {@code Future}, will yield
|
|
||||||
* the callable's result as its result and provide for
|
|
||||||
* cancellation of the underlying task
|
|
||||||
* @since 1.6
|
|
||||||
*/
|
|
||||||
protected <T> RunnableFuture<T> newTaskFor(Callable<T> callable) {
|
|
||||||
return new FutureTask<T>(callable);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @throws RejectedExecutionException {@inheritDoc}
|
|
||||||
* @throws NullPointerException {@inheritDoc}
|
|
||||||
*/
|
|
||||||
public Future<?> submit(Runnable task) {
|
|
||||||
if (task == null) throw new NullPointerException();
|
|
||||||
RunnableFuture<Void> ftask = newTaskFor(task, null);
|
|
||||||
execute(ftask);
|
|
||||||
return ftask;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @throws RejectedExecutionException {@inheritDoc}
|
|
||||||
* @throws NullPointerException {@inheritDoc}
|
|
||||||
*/
|
|
||||||
public <T> Future<T> submit(Runnable task, T result) {
|
|
||||||
if (task == null) throw new NullPointerException();
|
|
||||||
RunnableFuture<T> ftask = newTaskFor(task, result);
|
|
||||||
execute(ftask);
|
|
||||||
return ftask;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @throws RejectedExecutionException {@inheritDoc}
|
|
||||||
* @throws NullPointerException {@inheritDoc}
|
|
||||||
*/
|
|
||||||
public <T> Future<T> submit(Callable<T> task) {
|
|
||||||
if (task == null) throw new NullPointerException();
|
|
||||||
RunnableFuture<T> ftask = newTaskFor(task);
|
|
||||||
execute(ftask);
|
|
||||||
return ftask;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* the main mechanics of invokeAny.
|
|
||||||
*/
|
|
||||||
private <T> T doInvokeAny(Collection<? extends Callable<T>> tasks,
|
|
||||||
boolean timed, long nanos)
|
|
||||||
throws InterruptedException, ExecutionException, TimeoutException {
|
|
||||||
if (tasks == null)
|
|
||||||
throw new NullPointerException();
|
|
||||||
int ntasks = tasks.size();
|
|
||||||
if (ntasks == 0)
|
|
||||||
throw new IllegalArgumentException();
|
|
||||||
ArrayList<Future<T>> futures = new ArrayList<Future<T>>(ntasks);
|
|
||||||
ExecutorCompletionService<T> ecs =
|
|
||||||
new ExecutorCompletionService<T>(this);
|
|
||||||
|
|
||||||
// For efficiency, especially in executors with limited
|
|
||||||
// parallelism, check to see if previously submitted tasks are
|
|
||||||
// done before submitting more of them. This interleaving
|
|
||||||
// plus the exception mechanics account for messiness of main
|
|
||||||
// loop.
|
|
||||||
|
|
||||||
try {
|
|
||||||
// Record exceptions so that if we fail to obtain any
|
|
||||||
// result, we can throw the last exception we got.
|
|
||||||
ExecutionException ee = null;
|
|
||||||
final long deadline = timed ? System.nanoTime() + nanos : 0L;
|
|
||||||
Iterator<? extends Callable<T>> it = tasks.iterator();
|
|
||||||
|
|
||||||
// Start one task for sure; the rest incrementally
|
|
||||||
futures.add(ecs.submit(it.next()));
|
|
||||||
--ntasks;
|
|
||||||
int active = 1;
|
|
||||||
|
|
||||||
for (;;) {
|
|
||||||
Future<T> f = ecs.poll();
|
|
||||||
if (f == null) {
|
|
||||||
if (ntasks > 0) {
|
|
||||||
--ntasks;
|
|
||||||
futures.add(ecs.submit(it.next()));
|
|
||||||
++active;
|
|
||||||
}
|
|
||||||
else if (active == 0)
|
|
||||||
break;
|
|
||||||
else if (timed) {
|
|
||||||
f = ecs.poll(nanos, TimeUnit.NANOSECONDS);
|
|
||||||
if (f == null)
|
|
||||||
throw new TimeoutException();
|
|
||||||
nanos = deadline - System.nanoTime();
|
|
||||||
}
|
|
||||||
else
|
|
||||||
f = ecs.take();
|
|
||||||
}
|
|
||||||
if (f != null) {
|
|
||||||
--active;
|
|
||||||
try {
|
|
||||||
return f.get();
|
|
||||||
} catch (ExecutionException eex) {
|
|
||||||
ee = eex;
|
|
||||||
} catch (RuntimeException rex) {
|
|
||||||
ee = new ExecutionException(rex);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ee == null)
|
|
||||||
ee = new SuspendableExecutionException();
|
|
||||||
throw ee;
|
|
||||||
|
|
||||||
} finally {
|
|
||||||
for (int i = 0, size = futures.size(); i < size; i++)
|
|
||||||
futures.get(i).cancel(true);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public <T> T invokeAny(Collection<? extends Callable<T>> tasks)
|
|
||||||
throws InterruptedException, ExecutionException {
|
|
||||||
try {
|
|
||||||
return doInvokeAny(tasks, false, 0);
|
|
||||||
} catch (TimeoutException cannotHappen) {
|
|
||||||
assert false;
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public <T> T invokeAny(Collection<? extends Callable<T>> tasks,
|
|
||||||
long timeout, TimeUnit unit)
|
|
||||||
throws InterruptedException, ExecutionException, TimeoutException {
|
|
||||||
return doInvokeAny(tasks, true, unit.toNanos(timeout));
|
|
||||||
}
|
|
||||||
|
|
||||||
public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks)
|
|
||||||
throws InterruptedException {
|
|
||||||
if (tasks == null)
|
|
||||||
throw new NullPointerException();
|
|
||||||
ArrayList<Future<T>> futures = new ArrayList<Future<T>>(tasks.size());
|
|
||||||
boolean done = false;
|
|
||||||
try {
|
|
||||||
for (Callable<T> t : tasks) {
|
|
||||||
RunnableFuture<T> f = newTaskFor(t);
|
|
||||||
futures.add(f);
|
|
||||||
execute(f);
|
|
||||||
}
|
|
||||||
for (int i = 0, size = futures.size(); i < size; i++) {
|
|
||||||
Future<T> f = futures.get(i);
|
|
||||||
if (!f.isDone()) {
|
|
||||||
try {
|
|
||||||
f.get();
|
|
||||||
} catch (CancellationException ignore) {
|
|
||||||
} catch (ExecutionException ignore) {
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
done = true;
|
|
||||||
return futures;
|
|
||||||
} finally {
|
|
||||||
if (!done)
|
|
||||||
for (int i = 0, size = futures.size(); i < size; i++)
|
|
||||||
futures.get(i).cancel(true);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks,
|
|
||||||
long timeout, TimeUnit unit)
|
|
||||||
throws InterruptedException {
|
|
||||||
if (tasks == null)
|
|
||||||
throw new NullPointerException();
|
|
||||||
long nanos = unit.toNanos(timeout);
|
|
||||||
ArrayList<Future<T>> futures = new ArrayList<Future<T>>(tasks.size());
|
|
||||||
boolean done = false;
|
|
||||||
try {
|
|
||||||
for (Callable<T> t : tasks)
|
|
||||||
futures.add(newTaskFor(t));
|
|
||||||
|
|
||||||
final long deadline = System.nanoTime() + nanos;
|
|
||||||
final int size = futures.size();
|
|
||||||
|
|
||||||
// Interleave time checks and calls to execute in case
|
|
||||||
// executor doesn't have any/much parallelism.
|
|
||||||
for (int i = 0; i < size; i++) {
|
|
||||||
execute((Runnable)futures.get(i));
|
|
||||||
nanos = deadline - System.nanoTime();
|
|
||||||
if (nanos <= 0L)
|
|
||||||
return futures;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int i = 0; i < size; i++) {
|
|
||||||
Future<T> f = futures.get(i);
|
|
||||||
if (!f.isDone()) {
|
|
||||||
if (nanos <= 0L)
|
|
||||||
return futures;
|
|
||||||
try {
|
|
||||||
f.get(nanos, TimeUnit.NANOSECONDS);
|
|
||||||
} catch (CancellationException ignore) {
|
|
||||||
} catch (ExecutionException ignore) {
|
|
||||||
} catch (TimeoutException toe) {
|
|
||||||
return futures;
|
|
||||||
}
|
|
||||||
nanos = deadline - System.nanoTime();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
done = true;
|
|
||||||
return futures;
|
|
||||||
} finally {
|
|
||||||
if (!done)
|
|
||||||
for (int i = 0, size = futures.size(); i < size; i++)
|
|
||||||
futures.get(i).cancel(true);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
@@ -1,51 +0,0 @@
|
|||||||
/*
|
|
||||||
* ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
*
|
|
||||||
* Written by Doug Lea with assistance from members of JCP JSR-166
|
|
||||||
* Expert Group and released to the public domain, as explained at
|
|
||||||
* http://creativecommons.org/publicdomain/zero/1.0/
|
|
||||||
*/
|
|
||||||
|
|
||||||
package io.akarin.api.internal.utils.thread;
|
|
||||||
|
|
||||||
import java.util.concurrent.ExecutionException;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Exception thrown when attempting to retrieve the result of a task
|
|
||||||
* that aborted by throwing an exception. This exception can be
|
|
||||||
* inspected using the {@link #getCause()} method.
|
|
||||||
*
|
|
||||||
* @see Future
|
|
||||||
* @since 1.5
|
|
||||||
* @author Doug Lea
|
|
||||||
*/
|
|
||||||
public class SuspendableExecutionException extends ExecutionException {
|
|
||||||
private static final long serialVersionUID = 7830266012832686185L;
|
|
||||||
}
|
|
||||||
@@ -38,93 +38,22 @@ package io.akarin.api.internal.utils.thread;
|
|||||||
import java.util.concurrent.BlockingQueue;
|
import java.util.concurrent.BlockingQueue;
|
||||||
import java.util.concurrent.Callable;
|
import java.util.concurrent.Callable;
|
||||||
import java.util.concurrent.CompletionService;
|
import java.util.concurrent.CompletionService;
|
||||||
import java.util.concurrent.Executor;
|
|
||||||
import java.util.concurrent.Future;
|
import java.util.concurrent.Future;
|
||||||
import java.util.concurrent.FutureTask;
|
import java.util.concurrent.FutureTask;
|
||||||
import java.util.concurrent.LinkedBlockingQueue;
|
import java.util.concurrent.LinkedBlockingQueue;
|
||||||
import java.util.concurrent.RunnableFuture;
|
import java.util.concurrent.RunnableFuture;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
/**
|
|
||||||
* A {@link CompletionService} that uses a supplied {@link Executor}
|
|
||||||
* to execute tasks. This class arranges that submitted tasks are,
|
|
||||||
* upon completion, placed on a queue accessible using {@code take}.
|
|
||||||
* The class is lightweight enough to be suitable for transient use
|
|
||||||
* when processing groups of tasks.
|
|
||||||
*
|
|
||||||
* <p>
|
|
||||||
*
|
|
||||||
* <b>Usage Examples.</b>
|
|
||||||
*
|
|
||||||
* Suppose you have a set of solvers for a certain problem, each
|
|
||||||
* returning a value of some type {@code Result}, and would like to
|
|
||||||
* run them concurrently, processing the results of each of them that
|
|
||||||
* return a non-null value, in some method {@code use(Result r)}. You
|
|
||||||
* could write this as:
|
|
||||||
*
|
|
||||||
* <pre> {@code
|
|
||||||
* void solve(Executor e,
|
|
||||||
* Collection<Callable<Result>> solvers)
|
|
||||||
* throws InterruptedException, ExecutionException {
|
|
||||||
* CompletionService<Result> ecs
|
|
||||||
* = new ExecutorCompletionService<Result>(e);
|
|
||||||
* for (Callable<Result> s : solvers)
|
|
||||||
* ecs.submit(s);
|
|
||||||
* int n = solvers.size();
|
|
||||||
* for (int i = 0; i < n; ++i) {
|
|
||||||
* Result r = ecs.take().get();
|
|
||||||
* if (r != null)
|
|
||||||
* use(r);
|
|
||||||
* }
|
|
||||||
* }}</pre>
|
|
||||||
*
|
|
||||||
* Suppose instead that you would like to use the first non-null result
|
|
||||||
* of the set of tasks, ignoring any that encounter exceptions,
|
|
||||||
* and cancelling all other tasks when the first one is ready:
|
|
||||||
*
|
|
||||||
* <pre> {@code
|
|
||||||
* void solve(Executor e,
|
|
||||||
* Collection<Callable<Result>> solvers)
|
|
||||||
* throws InterruptedException {
|
|
||||||
* CompletionService<Result> ecs
|
|
||||||
* = new ExecutorCompletionService<Result>(e);
|
|
||||||
* int n = solvers.size();
|
|
||||||
* List<Future<Result>> futures
|
|
||||||
* = new ArrayList<Future<Result>>(n);
|
|
||||||
* Result result = null;
|
|
||||||
* try {
|
|
||||||
* for (Callable<Result> s : solvers)
|
|
||||||
* futures.add(ecs.submit(s));
|
|
||||||
* for (int i = 0; i < n; ++i) {
|
|
||||||
* try {
|
|
||||||
* Result r = ecs.take().get();
|
|
||||||
* if (r != null) {
|
|
||||||
* result = r;
|
|
||||||
* break;
|
|
||||||
* }
|
|
||||||
* } catch (ExecutionException ignore) {}
|
|
||||||
* }
|
|
||||||
* }
|
|
||||||
* finally {
|
|
||||||
* for (Future<Result> f : futures)
|
|
||||||
* f.cancel(true);
|
|
||||||
* }
|
|
||||||
*
|
|
||||||
* if (result != null)
|
|
||||||
* use(result);
|
|
||||||
* }}</pre>
|
|
||||||
*/
|
|
||||||
public class SuspendableExecutorCompletionService<V> implements CompletionService<V> {
|
public class SuspendableExecutorCompletionService<V> implements CompletionService<V> {
|
||||||
private final Executor executor;
|
private final SuspendableThreadPoolExecutor executor;
|
||||||
private final SuspendableAbstractExecutorService aes;
|
|
||||||
private final BlockingQueue<Future<V>> completionQueue;
|
private final BlockingQueue<Future<V>> completionQueue;
|
||||||
|
|
||||||
public static <V> void suspend(SuspendableExecutorCompletionService<V> service) {
|
public void suspend() {
|
||||||
SuspendableThreadPoolExecutor.suspend((SuspendableThreadPoolExecutor) service.executor);
|
executor.suspend();
|
||||||
}
|
}
|
||||||
|
|
||||||
public static <V> void resume(SuspendableExecutorCompletionService<V> service) {
|
public void resume() {
|
||||||
SuspendableThreadPoolExecutor.resume((SuspendableThreadPoolExecutor) service.executor);
|
executor.resume();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -140,17 +69,11 @@ public class SuspendableExecutorCompletionService<V> implements CompletionServic
|
|||||||
}
|
}
|
||||||
|
|
||||||
private RunnableFuture<V> newTaskFor(Callable<V> task) {
|
private RunnableFuture<V> newTaskFor(Callable<V> task) {
|
||||||
if (aes == null)
|
return new FutureTask<V>(task);
|
||||||
return new FutureTask<V>(task);
|
|
||||||
else
|
|
||||||
return aes.newTaskFor(task);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private RunnableFuture<V> newTaskFor(Runnable task, V result) {
|
private RunnableFuture<V> newTaskFor(Runnable task, V result) {
|
||||||
if (aes == null)
|
return new FutureTask<V>(task, result);
|
||||||
return new FutureTask<V>(task, result);
|
|
||||||
else
|
|
||||||
return aes.newTaskFor(task, result);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -161,12 +84,10 @@ public class SuspendableExecutorCompletionService<V> implements CompletionServic
|
|||||||
* @param executor the executor to use
|
* @param executor the executor to use
|
||||||
* @throws NullPointerException if executor is {@code null}
|
* @throws NullPointerException if executor is {@code null}
|
||||||
*/
|
*/
|
||||||
public SuspendableExecutorCompletionService(Executor executor) {
|
public SuspendableExecutorCompletionService(SuspendableThreadPoolExecutor executor) {
|
||||||
if (executor == null)
|
if (executor == null)
|
||||||
throw new NullPointerException();
|
throw new NullPointerException();
|
||||||
this.executor = executor;
|
this.executor = executor;
|
||||||
this.aes = (executor instanceof SuspendableAbstractExecutorService) ?
|
|
||||||
(SuspendableAbstractExecutorService) executor : null;
|
|
||||||
this.completionQueue = new LinkedBlockingQueue<Future<V>>();
|
this.completionQueue = new LinkedBlockingQueue<Future<V>>();
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -183,13 +104,11 @@ public class SuspendableExecutorCompletionService<V> implements CompletionServic
|
|||||||
* them not to be retrievable.
|
* them not to be retrievable.
|
||||||
* @throws NullPointerException if executor or completionQueue are {@code null}
|
* @throws NullPointerException if executor or completionQueue are {@code null}
|
||||||
*/
|
*/
|
||||||
public SuspendableExecutorCompletionService(Executor executor,
|
public SuspendableExecutorCompletionService(SuspendableThreadPoolExecutor executor,
|
||||||
BlockingQueue<Future<V>> completionQueue) {
|
BlockingQueue<Future<V>> completionQueue) {
|
||||||
if (executor == null || completionQueue == null)
|
if (executor == null || completionQueue == null)
|
||||||
throw new NullPointerException();
|
throw new NullPointerException();
|
||||||
this.executor = executor;
|
this.executor = executor;
|
||||||
this.aes = (executor instanceof SuspendableAbstractExecutorService) ?
|
|
||||||
(SuspendableAbstractExecutorService) executor : null;
|
|
||||||
this.completionQueue = completionQueue;
|
this.completionQueue = completionQueue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -34,6 +34,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
package io.akarin.api.internal.utils.thread;
|
package io.akarin.api.internal.utils.thread;
|
||||||
|
|
||||||
import java.util.concurrent.locks.AbstractQueuedSynchronizer;
|
import java.util.concurrent.locks.AbstractQueuedSynchronizer;
|
||||||
import java.util.concurrent.locks.Condition;
|
import java.util.concurrent.locks.Condition;
|
||||||
import java.util.concurrent.locks.ReentrantLock;
|
import java.util.concurrent.locks.ReentrantLock;
|
||||||
@@ -48,282 +49,6 @@ import java.util.concurrent.TimeUnit;
|
|||||||
import java.util.concurrent.atomic.AtomicInteger;
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
import java.util.*;
|
import java.util.*;
|
||||||
|
|
||||||
/**
|
|
||||||
* An {@link ExecutorService} that executes each submitted task using
|
|
||||||
* one of possibly several pooled threads, normally configured
|
|
||||||
* using {@link Executors} factory methods.
|
|
||||||
*
|
|
||||||
* <p>Thread pools address two different problems: they usually
|
|
||||||
* provide improved performance when executing large numbers of
|
|
||||||
* asynchronous tasks, due to reduced per-task invocation overhead,
|
|
||||||
* and they provide a means of bounding and managing the resources,
|
|
||||||
* including threads, consumed when executing a collection of tasks.
|
|
||||||
* Each {@code ThreadPoolExecutor} also maintains some basic
|
|
||||||
* statistics, such as the number of completed tasks.
|
|
||||||
*
|
|
||||||
* <p>To be useful across a wide range of contexts, this class
|
|
||||||
* provides many adjustable parameters and extensibility
|
|
||||||
* hooks. However, programmers are urged to use the more convenient
|
|
||||||
* {@link Executors} factory methods {@link
|
|
||||||
* Executors#newCachedThreadPool} (unbounded thread pool, with
|
|
||||||
* automatic thread reclamation), {@link Executors#newFixedThreadPool}
|
|
||||||
* (fixed size thread pool) and {@link
|
|
||||||
* Executors#newSingleThreadExecutor} (single background thread), that
|
|
||||||
* preconfigure settings for the most common usage
|
|
||||||
* scenarios. Otherwise, use the following guide when manually
|
|
||||||
* configuring and tuning this class:
|
|
||||||
*
|
|
||||||
* <dl>
|
|
||||||
*
|
|
||||||
* <dt>Core and maximum pool sizes</dt>
|
|
||||||
*
|
|
||||||
* <dd>A {@code ThreadPoolExecutor} will automatically adjust the
|
|
||||||
* pool size (see {@link #getPoolSize})
|
|
||||||
* according to the bounds set by
|
|
||||||
* corePoolSize (see {@link #getCorePoolSize}) and
|
|
||||||
* maximumPoolSize (see {@link #getMaximumPoolSize}).
|
|
||||||
*
|
|
||||||
* When a new task is submitted in method {@link #execute(Runnable)},
|
|
||||||
* and fewer than corePoolSize threads are running, a new thread is
|
|
||||||
* created to handle the request, even if other worker threads are
|
|
||||||
* idle. If there are more than corePoolSize but less than
|
|
||||||
* maximumPoolSize threads running, a new thread will be created only
|
|
||||||
* if the queue is full. By setting corePoolSize and maximumPoolSize
|
|
||||||
* the same, you create a fixed-size thread pool. By setting
|
|
||||||
* maximumPoolSize to an essentially unbounded value such as {@code
|
|
||||||
* Integer.MAX_VALUE}, you allow the pool to accommodate an arbitrary
|
|
||||||
* number of concurrent tasks. Most typically, core and maximum pool
|
|
||||||
* sizes are set only upon construction, but they may also be changed
|
|
||||||
* dynamically using {@link #setCorePoolSize} and {@link
|
|
||||||
* #setMaximumPoolSize}. </dd>
|
|
||||||
*
|
|
||||||
* <dt>On-demand construction</dt>
|
|
||||||
*
|
|
||||||
* <dd>By default, even core threads are initially created and
|
|
||||||
* started only when new tasks arrive, but this can be overridden
|
|
||||||
* dynamically using method {@link #prestartCoreThread} or {@link
|
|
||||||
* #prestartAllCoreThreads}. You probably want to prestart threads if
|
|
||||||
* you construct the pool with a non-empty queue. </dd>
|
|
||||||
*
|
|
||||||
* <dt>Creating new threads</dt>
|
|
||||||
*
|
|
||||||
* <dd>New threads are created using a {@link ThreadFactory}. If not
|
|
||||||
* otherwise specified, a {@link Executors#defaultThreadFactory} is
|
|
||||||
* used, that creates threads to all be in the same {@link
|
|
||||||
* ThreadGroup} and with the same {@code NORM_PRIORITY} priority and
|
|
||||||
* non-daemon status. By supplying a different ThreadFactory, you can
|
|
||||||
* alter the thread's name, thread group, priority, daemon status,
|
|
||||||
* etc. If a {@code ThreadFactory} fails to create a thread when asked
|
|
||||||
* by returning null from {@code newThread}, the executor will
|
|
||||||
* continue, but might not be able to execute any tasks. Threads
|
|
||||||
* should possess the "modifyThread" {@code RuntimePermission}. If
|
|
||||||
* worker threads or other threads using the pool do not possess this
|
|
||||||
* permission, service may be degraded: configuration changes may not
|
|
||||||
* take effect in a timely manner, and a shutdown pool may remain in a
|
|
||||||
* state in which termination is possible but not completed.</dd>
|
|
||||||
*
|
|
||||||
* <dt>Keep-alive times</dt>
|
|
||||||
*
|
|
||||||
* <dd>If the pool currently has more than corePoolSize threads,
|
|
||||||
* excess threads will be terminated if they have been idle for more
|
|
||||||
* than the keepAliveTime (see {@link #getKeepAliveTime(TimeUnit)}).
|
|
||||||
* This provides a means of reducing resource consumption when the
|
|
||||||
* pool is not being actively used. If the pool becomes more active
|
|
||||||
* later, new threads will be constructed. This parameter can also be
|
|
||||||
* changed dynamically using method {@link #setKeepAliveTime(long,
|
|
||||||
* TimeUnit)}. Using a value of {@code Long.MAX_VALUE} {@link
|
|
||||||
* TimeUnit#NANOSECONDS} effectively disables idle threads from ever
|
|
||||||
* terminating prior to shut down. By default, the keep-alive policy
|
|
||||||
* applies only when there are more than corePoolSize threads. But
|
|
||||||
* method {@link #allowCoreThreadTimeOut(boolean)} can be used to
|
|
||||||
* apply this time-out policy to core threads as well, so long as the
|
|
||||||
* keepAliveTime value is non-zero. </dd>
|
|
||||||
*
|
|
||||||
* <dt>Queuing</dt>
|
|
||||||
*
|
|
||||||
* <dd>Any {@link BlockingQueue} may be used to transfer and hold
|
|
||||||
* submitted tasks. The use of this queue interacts with pool sizing:
|
|
||||||
*
|
|
||||||
* <ul>
|
|
||||||
*
|
|
||||||
* <li> If fewer than corePoolSize threads are running, the Executor
|
|
||||||
* always prefers adding a new thread
|
|
||||||
* rather than queuing.</li>
|
|
||||||
*
|
|
||||||
* <li> If corePoolSize or more threads are running, the Executor
|
|
||||||
* always prefers queuing a request rather than adding a new
|
|
||||||
* thread.</li>
|
|
||||||
*
|
|
||||||
* <li> If a request cannot be queued, a new thread is created unless
|
|
||||||
* this would exceed maximumPoolSize, in which case, the task will be
|
|
||||||
* rejected.</li>
|
|
||||||
*
|
|
||||||
* </ul>
|
|
||||||
*
|
|
||||||
* There are three general strategies for queuing:
|
|
||||||
* <ol>
|
|
||||||
*
|
|
||||||
* <li> <em> Direct handoffs.</em> A good default choice for a work
|
|
||||||
* queue is a {@link SynchronousQueue} that hands off tasks to threads
|
|
||||||
* without otherwise holding them. Here, an attempt to queue a task
|
|
||||||
* will fail if no threads are immediately available to run it, so a
|
|
||||||
* new thread will be constructed. This policy avoids lockups when
|
|
||||||
* handling sets of requests that might have internal dependencies.
|
|
||||||
* Direct handoffs generally require unbounded maximumPoolSizes to
|
|
||||||
* avoid rejection of new submitted tasks. This in turn admits the
|
|
||||||
* possibility of unbounded thread growth when commands continue to
|
|
||||||
* arrive on average faster than they can be processed. </li>
|
|
||||||
*
|
|
||||||
* <li><em> Unbounded queues.</em> Using an unbounded queue (for
|
|
||||||
* example a {@link LinkedBlockingQueue} without a predefined
|
|
||||||
* capacity) will cause new tasks to wait in the queue when all
|
|
||||||
* corePoolSize threads are busy. Thus, no more than corePoolSize
|
|
||||||
* threads will ever be created. (And the value of the maximumPoolSize
|
|
||||||
* therefore doesn't have any effect.) This may be appropriate when
|
|
||||||
* each task is completely independent of others, so tasks cannot
|
|
||||||
* affect each others execution; for example, in a web page server.
|
|
||||||
* While this style of queuing can be useful in smoothing out
|
|
||||||
* transient bursts of requests, it admits the possibility of
|
|
||||||
* unbounded work queue growth when commands continue to arrive on
|
|
||||||
* average faster than they can be processed. </li>
|
|
||||||
*
|
|
||||||
* <li><em>Bounded queues.</em> A bounded queue (for example, an
|
|
||||||
* {@link ArrayBlockingQueue}) helps prevent resource exhaustion when
|
|
||||||
* used with finite maximumPoolSizes, but can be more difficult to
|
|
||||||
* tune and control. Queue sizes and maximum pool sizes may be traded
|
|
||||||
* off for each other: Using large queues and small pools minimizes
|
|
||||||
* CPU usage, OS resources, and context-switching overhead, but can
|
|
||||||
* lead to artificially low throughput. If tasks frequently block (for
|
|
||||||
* example if they are I/O bound), a system may be able to schedule
|
|
||||||
* time for more threads than you otherwise allow. Use of small queues
|
|
||||||
* generally requires larger pool sizes, which keeps CPUs busier but
|
|
||||||
* may encounter unacceptable scheduling overhead, which also
|
|
||||||
* decreases throughput. </li>
|
|
||||||
*
|
|
||||||
* </ol>
|
|
||||||
*
|
|
||||||
* </dd>
|
|
||||||
*
|
|
||||||
* <dt>Rejected tasks</dt>
|
|
||||||
*
|
|
||||||
* <dd>New tasks submitted in method {@link #execute(Runnable)} will be
|
|
||||||
* <em>rejected</em> when the Executor has been shut down, and also when
|
|
||||||
* the Executor uses finite bounds for both maximum threads and work queue
|
|
||||||
* capacity, and is saturated. In either case, the {@code execute} method
|
|
||||||
* invokes the {@link
|
|
||||||
* RejectedExecutionHandler#rejectedExecution(Runnable, ThreadPoolExecutor)}
|
|
||||||
* method of its {@link RejectedExecutionHandler}. Four predefined handler
|
|
||||||
* policies are provided:
|
|
||||||
*
|
|
||||||
* <ol>
|
|
||||||
*
|
|
||||||
* <li> In the default {@link SuspendableThreadPoolExecutor.AbortPolicy}, the
|
|
||||||
* handler throws a runtime {@link RejectedExecutionException} upon
|
|
||||||
* rejection. </li>
|
|
||||||
*
|
|
||||||
* <li> In {@link SuspendableThreadPoolExecutor.CallerRunsPolicy}, the thread
|
|
||||||
* that invokes {@code execute} itself runs the task. This provides a
|
|
||||||
* simple feedback control mechanism that will slow down the rate that
|
|
||||||
* new tasks are submitted. </li>
|
|
||||||
*
|
|
||||||
* <li> In {@link SuspendableThreadPoolExecutor.DiscardPolicy}, a task that
|
|
||||||
* cannot be executed is simply dropped. </li>
|
|
||||||
*
|
|
||||||
* <li>In {@link SuspendableThreadPoolExecutor.DiscardOldestPolicy}, if the
|
|
||||||
* executor is not shut down, the task at the head of the work queue
|
|
||||||
* is dropped, and then execution is retried (which can fail again,
|
|
||||||
* causing this to be repeated.) </li>
|
|
||||||
*
|
|
||||||
* </ol>
|
|
||||||
*
|
|
||||||
* It is possible to define and use other kinds of {@link
|
|
||||||
* RejectedExecutionHandler} classes. Doing so requires some care
|
|
||||||
* especially when policies are designed to work only under particular
|
|
||||||
* capacity or queuing policies. </dd>
|
|
||||||
*
|
|
||||||
* <dt>Hook methods</dt>
|
|
||||||
*
|
|
||||||
* <dd>This class provides {@code protected} overridable
|
|
||||||
* {@link #beforeExecute(Thread, Runnable)} and
|
|
||||||
* {@link #afterExecute(Runnable, Throwable)} methods that are called
|
|
||||||
* before and after execution of each task. These can be used to
|
|
||||||
* manipulate the execution environment; for example, reinitializing
|
|
||||||
* ThreadLocals, gathering statistics, or adding log entries.
|
|
||||||
* Additionally, method {@link #terminated} can be overridden to perform
|
|
||||||
* any special processing that needs to be done once the Executor has
|
|
||||||
* fully terminated.
|
|
||||||
*
|
|
||||||
* <p>If hook or callback methods throw exceptions, internal worker
|
|
||||||
* threads may in turn fail and abruptly terminate.</dd>
|
|
||||||
*
|
|
||||||
* <dt>Queue maintenance</dt>
|
|
||||||
*
|
|
||||||
* <dd>Method {@link #getQueue()} allows access to the work queue
|
|
||||||
* for purposes of monitoring and debugging. Use of this method for
|
|
||||||
* any other purpose is strongly discouraged. Two supplied methods,
|
|
||||||
* {@link #remove(Runnable)} and {@link #purge} are available to
|
|
||||||
* assist in storage reclamation when large numbers of queued tasks
|
|
||||||
* become cancelled.</dd>
|
|
||||||
*
|
|
||||||
* <dt>Finalization</dt>
|
|
||||||
*
|
|
||||||
* <dd>A pool that is no longer referenced in a program <em>AND</em>
|
|
||||||
* has no remaining threads will be {@code shutdown} automatically. If
|
|
||||||
* you would like to ensure that unreferenced pools are reclaimed even
|
|
||||||
* if users forget to call {@link #shutdown}, then you must arrange
|
|
||||||
* that unused threads eventually die, by setting appropriate
|
|
||||||
* keep-alive times, using a lower bound of zero core threads and/or
|
|
||||||
* setting {@link #allowCoreThreadTimeOut(boolean)}. </dd>
|
|
||||||
*
|
|
||||||
* </dl>
|
|
||||||
*
|
|
||||||
* <p><b>Extension example</b>. Most extensions of this class
|
|
||||||
* override one or more of the protected hook methods. For example,
|
|
||||||
* here is a subclass that adds a simple pause/resume feature:
|
|
||||||
*
|
|
||||||
* <pre> {@code
|
|
||||||
* class PausableThreadPoolExecutor extends ThreadPoolExecutor {
|
|
||||||
* private boolean isPaused;
|
|
||||||
* private ReentrantLock pauseLock = new ReentrantLock();
|
|
||||||
* private Condition unpaused = pauseLock.newCondition();
|
|
||||||
*
|
|
||||||
* public PausableThreadPoolExecutor(...) { super(...); }
|
|
||||||
*
|
|
||||||
* protected void beforeExecute(Thread t, Runnable r) {
|
|
||||||
* super.beforeExecute(t, r);
|
|
||||||
* pauseLock.lock();
|
|
||||||
* try {
|
|
||||||
* while (isPaused) unpaused.await();
|
|
||||||
* } catch (InterruptedException ie) {
|
|
||||||
* t.interrupt();
|
|
||||||
* } finally {
|
|
||||||
* pauseLock.unlock();
|
|
||||||
* }
|
|
||||||
* }
|
|
||||||
*
|
|
||||||
* public void pause() {
|
|
||||||
* pauseLock.lock();
|
|
||||||
* try {
|
|
||||||
* isPaused = true;
|
|
||||||
* } finally {
|
|
||||||
* pauseLock.unlock();
|
|
||||||
* }
|
|
||||||
* }
|
|
||||||
*
|
|
||||||
* public void resume() {
|
|
||||||
* pauseLock.lock();
|
|
||||||
* try {
|
|
||||||
* isPaused = false;
|
|
||||||
* unpaused.signalAll();
|
|
||||||
* } finally {
|
|
||||||
* pauseLock.unlock();
|
|
||||||
* }
|
|
||||||
* }
|
|
||||||
* }}</pre>
|
|
||||||
*
|
|
||||||
* @since 1.5
|
|
||||||
* @author Doug Lea
|
|
||||||
*/
|
|
||||||
public class SuspendableThreadPoolExecutor extends ThreadPoolExecutor {
|
public class SuspendableThreadPoolExecutor extends ThreadPoolExecutor {
|
||||||
/**
|
/**
|
||||||
* The main pool control state, ctl, is an atomic integer packing
|
* The main pool control state, ctl, is an atomic integer packing
|
||||||
@@ -398,16 +123,14 @@ public class SuspendableThreadPoolExecutor extends ThreadPoolExecutor {
|
|||||||
private static int workerCountOf(int c) { return c & CAPACITY; }
|
private static int workerCountOf(int c) { return c & CAPACITY; }
|
||||||
private static int ctlOf(int rs, int wc) { return rs | wc; }
|
private static int ctlOf(int rs, int wc) { return rs | wc; }
|
||||||
|
|
||||||
@SuppressWarnings("deprecation")
|
public void suspend() {
|
||||||
public static void suspend(SuspendableThreadPoolExecutor executor) {
|
Thread curThread = Thread.currentThread();
|
||||||
long currentThread = Thread.currentThread().getId();
|
for (Worker worker : workers) if (worker.thread != curThread) worker.thread.suspend();
|
||||||
for (Worker worker : executor.workers) if (worker.thread.getId() != currentThread) worker.thread.suspend();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@SuppressWarnings("deprecation")
|
public void resume() {
|
||||||
public static void resume(SuspendableThreadPoolExecutor executor) {
|
Thread curThread = Thread.currentThread();
|
||||||
long currentThread = Thread.currentThread().getId();
|
for (Worker worker : workers) if (worker.thread != curThread) worker.thread.resume();
|
||||||
for (Worker worker : executor.workers) if (worker.thread.getId() != currentThread) worker.thread.resume();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|||||||
@@ -18,8 +18,8 @@ import net.minecraft.server.MinecraftServer;
|
|||||||
public abstract class MixinTimingHandler implements IMixinTimingHandler {
|
public abstract class MixinTimingHandler implements IMixinTimingHandler {
|
||||||
@Shadow @Final String name;
|
@Shadow @Final String name;
|
||||||
@Shadow private boolean enabled;
|
@Shadow private boolean enabled;
|
||||||
@Shadow private volatile long start;
|
@Shadow private long start;
|
||||||
@Shadow private volatile int timingDepth;
|
@Shadow private int timingDepth;
|
||||||
|
|
||||||
@Shadow abstract void addDiff(long diff);
|
@Shadow abstract void addDiff(long diff);
|
||||||
@Shadow public abstract Timing startTiming();
|
@Shadow public abstract Timing startTiming();
|
||||||
@@ -47,7 +47,7 @@ public abstract class MixinTimingHandler implements IMixinTimingHandler {
|
|||||||
if (!alreadySync) {
|
if (!alreadySync) {
|
||||||
Thread curThread = Thread.currentThread();
|
Thread curThread = Thread.currentThread();
|
||||||
if (curThread != MinecraftServer.getServer().primaryThread) {
|
if (curThread != MinecraftServer.getServer().primaryThread) {
|
||||||
if (false && !AkarinGlobalConfig.silentAsyncTimings) {
|
if (!AkarinGlobalConfig.silentAsyncTimings) {
|
||||||
Bukkit.getLogger().log(Level.SEVERE, "stopTiming called async for " + name);
|
Bukkit.getLogger().log(Level.SEVERE, "stopTiming called async for " + name);
|
||||||
Thread.dumpStack();
|
Thread.dumpStack();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,8 +3,6 @@ package org.bukkit.plugin;
|
|||||||
import org.bukkit.event.Event;
|
import org.bukkit.event.Event;
|
||||||
import org.bukkit.event.EventException;
|
import org.bukkit.event.EventException;
|
||||||
import org.bukkit.event.Listener;
|
import org.bukkit.event.Listener;
|
||||||
import org.bukkit.event.block.BlockPhysicsEvent;
|
|
||||||
|
|
||||||
// Paper start
|
// Paper start
|
||||||
import java.lang.reflect.Method;
|
import java.lang.reflect.Method;
|
||||||
import java.lang.reflect.Modifier;
|
import java.lang.reflect.Modifier;
|
||||||
@@ -22,6 +20,10 @@ import io.akarin.api.internal.Akari;
|
|||||||
import io.akarin.api.internal.utils.thread.SuspendableExecutorCompletionService;
|
import io.akarin.api.internal.utils.thread.SuspendableExecutorCompletionService;
|
||||||
// Paper end
|
// Paper end
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Akarin Changes Note
|
||||||
|
* 1) Suspend for event (safety issue)
|
||||||
|
*/
|
||||||
/**
|
/**
|
||||||
* Interface which defines the class for event call backs to plugins
|
* Interface which defines the class for event call backs to plugins
|
||||||
*/
|
*/
|
||||||
@@ -71,11 +73,11 @@ public interface EventExecutor {
|
|||||||
if (!eventClass.isInstance(event)) return;
|
if (!eventClass.isInstance(event)) return;
|
||||||
try {
|
try {
|
||||||
Akari.eventSuspendTiming.startTimingIfSync(); // Akarin
|
Akari.eventSuspendTiming.startTimingIfSync(); // Akarin
|
||||||
SuspendableExecutorCompletionService.suspend(Akari.STAGE_TICK); // Akarin
|
Akari.STAGE_TICK.suspend(); // Akarin
|
||||||
Akari.eventSuspendTiming.stopTimingIfSync(); // Akarin
|
Akari.eventSuspendTiming.stopTimingIfSync(); // Akarin
|
||||||
asmExecutor.execute(listener, event);
|
asmExecutor.execute(listener, event);
|
||||||
Akari.eventResumeTiming.startTimingIfSync(); // Akarin
|
Akari.eventResumeTiming.startTimingIfSync(); // Akarin
|
||||||
SuspendableExecutorCompletionService.resume(Akari.STAGE_TICK); // Akarin
|
Akari.STAGE_TICK.resume(); // Akarin
|
||||||
Akari.eventResumeTiming.stopTimingIfSync(); // Akarin
|
Akari.eventResumeTiming.stopTimingIfSync(); // Akarin
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
throw new EventException(e);
|
throw new EventException(e);
|
||||||
|
|||||||
Reference in New Issue
Block a user