9
0
mirror of https://github.com/BX-Team/DivineMC.git synced 2025-12-22 08:19:19 +00:00

move patches to work

This commit is contained in:
NONPLAYT
2025-07-06 01:21:32 +03:00
parent c15046f9ae
commit dd0274ddc1
86 changed files with 17 additions and 2172 deletions

View File

@@ -175,10 +175,10 @@ index 345d4b80bd4383e0fb66d744d87bc8ef4100fd32..68a074a1eb11b158af773a2c44aa49d5
private static boolean isRightDistanceToPlayerAndSpawnPoint(ServerLevel level, ChunkAccess chunk, BlockPos.MutableBlockPos pos, double distance) { private static boolean isRightDistanceToPlayerAndSpawnPoint(ServerLevel level, ChunkAccess chunk, BlockPos.MutableBlockPos pos, double distance) {
diff --git a/net/minecraft/world/level/chunk/ChunkAccess.java b/net/minecraft/world/level/chunk/ChunkAccess.java diff --git a/net/minecraft/world/level/chunk/ChunkAccess.java b/net/minecraft/world/level/chunk/ChunkAccess.java
index 5e9f3856c384dbb2bd462121b903cd2b326e4376..19f74518923783d8d5560b526a1f267dabd23156 100644 index 182c14b660f8860bed627eed4e01fd4002153e9a..81511de113c292549fe5fe720a15bf3e0497ca84 100644
--- a/net/minecraft/world/level/chunk/ChunkAccess.java --- a/net/minecraft/world/level/chunk/ChunkAccess.java
+++ b/net/minecraft/world/level/chunk/ChunkAccess.java +++ b/net/minecraft/world/level/chunk/ChunkAccess.java
@@ -92,6 +92,7 @@ public abstract class ChunkAccess implements BiomeManager.NoiseBiomeSource, Ligh @@ -88,6 +88,7 @@ public abstract class ChunkAccess implements BiomeManager.NoiseBiomeSource, Ligh
public org.bukkit.craftbukkit.persistence.DirtyCraftPersistentDataContainer persistentDataContainer = new org.bukkit.craftbukkit.persistence.DirtyCraftPersistentDataContainer(ChunkAccess.DATA_TYPE_REGISTRY); public org.bukkit.craftbukkit.persistence.DirtyCraftPersistentDataContainer persistentDataContainer = new org.bukkit.craftbukkit.persistence.DirtyCraftPersistentDataContainer(ChunkAccess.DATA_TYPE_REGISTRY);
// CraftBukkit end // CraftBukkit end
public final Registry<Biome> biomeRegistry; // CraftBukkit public final Registry<Biome> biomeRegistry; // CraftBukkit

View File

@@ -25,7 +25,7 @@ index c4a4f08272b34f72dea4feaaeb66d153b2aab8c8..be5da5a81246b4f4abe19f7c0cf68990
} }
diff --git a/net/minecraft/world/level/Level.java b/net/minecraft/world/level/Level.java diff --git a/net/minecraft/world/level/Level.java b/net/minecraft/world/level/Level.java
index 6e8075618baf98fcc396f0b5e241a806805b3d94..37f5bd2a63e2ec074fbc55d366e0d128f1918089 100644 index 4c1ce7e85f9c3315635472047ffaf15a711aeffd..9625213b7c1295b813071dbedea5366510c7072f 100644
--- a/net/minecraft/world/level/Level.java --- a/net/minecraft/world/level/Level.java
+++ b/net/minecraft/world/level/Level.java +++ b/net/minecraft/world/level/Level.java
@@ -1171,6 +1171,12 @@ public abstract class Level implements LevelAccessor, UUIDLookup<Entity>, AutoCl @@ -1171,6 +1171,12 @@ public abstract class Level implements LevelAccessor, UUIDLookup<Entity>, AutoCl

View File

@@ -15,7 +15,7 @@ The delay is currently set to 2 seconds, however, we may want to adjust this bef
This patch fixes PaperMC/Paper#9581 This patch fixes PaperMC/Paper#9581
diff --git a/ca/spottedleaf/moonrise/patches/chunk_system/player/RegionizedPlayerChunkLoader.java b/ca/spottedleaf/moonrise/patches/chunk_system/player/RegionizedPlayerChunkLoader.java diff --git a/ca/spottedleaf/moonrise/patches/chunk_system/player/RegionizedPlayerChunkLoader.java b/ca/spottedleaf/moonrise/patches/chunk_system/player/RegionizedPlayerChunkLoader.java
index e93a006cde4dd85a9976e0d6a64643755ba99fb7..62e89385fcdc3fa0202863f3199c98a2df4be2a6 100644 index bdc1200ef5317fdaf58973bf580b0a672aee800f..1ed2ae41e47b2446bf1835efc8bad369408d52da 100644
--- a/ca/spottedleaf/moonrise/patches/chunk_system/player/RegionizedPlayerChunkLoader.java --- a/ca/spottedleaf/moonrise/patches/chunk_system/player/RegionizedPlayerChunkLoader.java
+++ b/ca/spottedleaf/moonrise/patches/chunk_system/player/RegionizedPlayerChunkLoader.java +++ b/ca/spottedleaf/moonrise/patches/chunk_system/player/RegionizedPlayerChunkLoader.java
@@ -48,6 +48,7 @@ public final class RegionizedPlayerChunkLoader { @@ -48,6 +48,7 @@ public final class RegionizedPlayerChunkLoader {
@@ -27,10 +27,10 @@ index e93a006cde4dd85a9976e0d6a64643755ba99fb7..62e89385fcdc3fa0202863f3199c98a2
public static final int GENERATED_TICKET_LEVEL = ChunkHolderManager.FULL_LOADED_TICKET_LEVEL; public static final int GENERATED_TICKET_LEVEL = ChunkHolderManager.FULL_LOADED_TICKET_LEVEL;
public static final int LOADED_TICKET_LEVEL = ChunkTaskScheduler.getTicketLevel(ChunkStatus.EMPTY); public static final int LOADED_TICKET_LEVEL = ChunkTaskScheduler.getTicketLevel(ChunkStatus.EMPTY);
diff --git a/net/minecraft/server/players/PlayerList.java b/net/minecraft/server/players/PlayerList.java diff --git a/net/minecraft/server/players/PlayerList.java b/net/minecraft/server/players/PlayerList.java
index e1ca822d41311e3be44c52badb907619ca681cf9..bd6cff4916fdf379ee887259d18ee274ff2f8bc6 100644 index 8a67672f1175769ac213099331453fbae59442fa..c70b5ce2dc8cbcdea8715339a63e038f94849bfb 100644
--- a/net/minecraft/server/players/PlayerList.java --- a/net/minecraft/server/players/PlayerList.java
+++ b/net/minecraft/server/players/PlayerList.java +++ b/net/minecraft/server/players/PlayerList.java
@@ -316,6 +316,13 @@ public abstract class PlayerList { @@ -315,6 +315,13 @@ public abstract class PlayerList {
// this.broadcastAll(ClientboundPlayerInfoUpdatePacket.createPlayerInitializing(List.of(player))); // CraftBukkit - replaced with loop below // this.broadcastAll(ClientboundPlayerInfoUpdatePacket.createPlayerInitializing(List.of(player))); // CraftBukkit - replaced with loop below
// Paper start - Fire PlayerJoinEvent when Player is actually ready; correctly register player BEFORE PlayerJoinEvent, so the entity is valid and doesn't require tick delay hacks // Paper start - Fire PlayerJoinEvent when Player is actually ready; correctly register player BEFORE PlayerJoinEvent, so the entity is valid and doesn't require tick delay hacks
player.supressTrackerForLogin = true; player.supressTrackerForLogin = true;

View File

@@ -5,22 +5,22 @@ Subject: [PATCH] Smooth teleport API
diff --git a/net/minecraft/server/level/ServerPlayer.java b/net/minecraft/server/level/ServerPlayer.java diff --git a/net/minecraft/server/level/ServerPlayer.java b/net/minecraft/server/level/ServerPlayer.java
index cf4ab76f463836a8ed9aeedd09ae95e75b9e8dbc..f5a0c5a2f56376bf89b16a809d465bc45a80eb38 100644 index a1b4720ef128ba5cbe1466a7a584d4fe501a71f8..b75d2d2746c3e7b12f65b0bcb559cd7e0ce7ebf8 100644
--- a/net/minecraft/server/level/ServerPlayer.java --- a/net/minecraft/server/level/ServerPlayer.java
+++ b/net/minecraft/server/level/ServerPlayer.java +++ b/net/minecraft/server/level/ServerPlayer.java
@@ -431,6 +431,7 @@ public class ServerPlayer extends Player implements ca.spottedleaf.moonrise.patc @@ -430,6 +430,7 @@ public class ServerPlayer extends Player implements ca.spottedleaf.moonrise.patc
private boolean tpsBar = false; // Purpur - Implement TPSBar
private boolean compassBar = false; // Purpur - Add compass command private boolean compassBar = false; // Purpur - Add compass command
private boolean ramBar = false; // Purpur - Implement rambar commands private boolean ramBar = false; // Purpur - Implement rambar commands
public boolean hasTickedAtLeastOnceInNewWorld = false; // DivineMC - Parallel world ticking
+ public boolean smoothWorldTeleport; // DivineMC - Smooth teleport API + public boolean smoothWorldTeleport; // DivineMC - Smooth teleport API
// Paper start - rewrite chunk system // Paper start - rewrite chunk system
private ca.spottedleaf.moonrise.patches.chunk_system.player.RegionizedPlayerChunkLoader.PlayerChunkLoaderData chunkLoader; private ca.spottedleaf.moonrise.patches.chunk_system.player.RegionizedPlayerChunkLoader.PlayerChunkLoaderData chunkLoader;
diff --git a/net/minecraft/server/players/PlayerList.java b/net/minecraft/server/players/PlayerList.java diff --git a/net/minecraft/server/players/PlayerList.java b/net/minecraft/server/players/PlayerList.java
index 04f82f77e1ad2b7105cbace2a4ef99590965ae4f..147535646319018ec5dfe42d12fdb19d9e1f7543 100644 index c70b5ce2dc8cbcdea8715339a63e038f94849bfb..9362bfdf8f5495d237b1e74be4dd925db2452dc0 100644
--- a/net/minecraft/server/players/PlayerList.java --- a/net/minecraft/server/players/PlayerList.java
+++ b/net/minecraft/server/players/PlayerList.java +++ b/net/minecraft/server/players/PlayerList.java
@@ -758,11 +758,11 @@ public abstract class PlayerList { @@ -748,11 +748,11 @@ public abstract class PlayerList {
byte b = (byte)(keepInventory ? 1 : 0); byte b = (byte)(keepInventory ? 1 : 0);
ServerLevel serverLevel = serverPlayer.level(); ServerLevel serverLevel = serverPlayer.level();
LevelData levelData = serverLevel.getLevelData(); LevelData levelData = serverLevel.getLevelData();
@@ -34,7 +34,7 @@ index 04f82f77e1ad2b7105cbace2a4ef99590965ae4f..147535646319018ec5dfe42d12fdb19d
serverPlayer.connection.send(new ClientboundSetDefaultSpawnPositionPacket(level.getSharedSpawnPos(), level.getSharedSpawnAngle())); serverPlayer.connection.send(new ClientboundSetDefaultSpawnPositionPacket(level.getSharedSpawnPos(), level.getSharedSpawnAngle()));
serverPlayer.connection.send(new ClientboundChangeDifficultyPacket(levelData.getDifficulty(), levelData.isDifficultyLocked())); serverPlayer.connection.send(new ClientboundChangeDifficultyPacket(levelData.getDifficulty(), levelData.isDifficultyLocked()));
serverPlayer.connection serverPlayer.connection
@@ -849,6 +849,12 @@ public abstract class PlayerList { @@ -839,6 +839,12 @@ public abstract class PlayerList {
return serverPlayer; return serverPlayer;
} }

View File

@@ -5,10 +5,10 @@ Subject: [PATCH] Paper PR: Add FillBottleEvents for player and dispenser
diff --git a/src/main/java/org/bukkit/craftbukkit/event/CraftEventFactory.java b/src/main/java/org/bukkit/craftbukkit/event/CraftEventFactory.java diff --git a/src/main/java/org/bukkit/craftbukkit/event/CraftEventFactory.java b/src/main/java/org/bukkit/craftbukkit/event/CraftEventFactory.java
index 214bc24aa301f99c911a129676bc7d7d50df7236..83c6cf3cb062c8a6508728822e37d52a543415a3 100644 index a162440a583801671787163d998d6b9546ef7e61..d10ee84ed2f6b1c81667b968984f3ebf5c39e445 100644
--- a/src/main/java/org/bukkit/craftbukkit/event/CraftEventFactory.java --- a/src/main/java/org/bukkit/craftbukkit/event/CraftEventFactory.java
+++ b/src/main/java/org/bukkit/craftbukkit/event/CraftEventFactory.java +++ b/src/main/java/org/bukkit/craftbukkit/event/CraftEventFactory.java
@@ -2148,4 +2148,18 @@ public class CraftEventFactory { @@ -2128,4 +2128,18 @@ public class CraftEventFactory {
return disconnectReason; return disconnectReason;
} }

View File

@@ -5,10 +5,10 @@ Subject: [PATCH] Optimize default values for configs
diff --git a/src/main/java/io/papermc/paper/configuration/GlobalConfiguration.java b/src/main/java/io/papermc/paper/configuration/GlobalConfiguration.java diff --git a/src/main/java/io/papermc/paper/configuration/GlobalConfiguration.java b/src/main/java/io/papermc/paper/configuration/GlobalConfiguration.java
index d13c749cfb49ce3c70b4c24ece780a4fc9482d78..a25a2bb0008901a249cf2cc320944bd69cf8ae72 100644 index c5a491acfe4b93bfa8fd21861edbaf464a178bf3..f572613f3b2aeacfbfcd0a5f96048f55ec2384b9 100644
--- a/src/main/java/io/papermc/paper/configuration/GlobalConfiguration.java --- a/src/main/java/io/papermc/paper/configuration/GlobalConfiguration.java
+++ b/src/main/java/io/papermc/paper/configuration/GlobalConfiguration.java +++ b/src/main/java/io/papermc/paper/configuration/GlobalConfiguration.java
@@ -347,8 +347,8 @@ public class GlobalConfiguration extends ConfigurationPart { @@ -348,8 +348,8 @@ public class GlobalConfiguration extends ConfigurationPart {
@Constraints.Min(4) @Constraints.Min(4)
public int regionFileCacheSize = 256; public int regionFileCacheSize = 256;
@Comment("See https://luckformula.emc.gs") @Comment("See https://luckformula.emc.gs")

View File

@@ -5,7 +5,7 @@ Subject: [PATCH] Smooth teleport API
diff --git a/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java b/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java diff --git a/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java b/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java
index f2a7c688597d9a07e7ef07476cedbd423c8b3b0d..60669bf18850811546b46d85c6650b02cda963ab 100644 index 218556fdaf4ea4993864e22530b4bad3335a535d..6a1ac6cdd0cf4f6a62216c264f6fd3cd25476254 100644
--- a/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java --- a/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java
+++ b/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java +++ b/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java
@@ -1329,6 +1329,17 @@ public class CraftPlayer extends CraftHumanEntity implements Player, PluginMessa @@ -1329,6 +1329,17 @@ public class CraftPlayer extends CraftHumanEntity implements Player, PluginMessa

View File

@@ -1,191 +0,0 @@
package com.ishland.flowsched.executor;
import com.ishland.flowsched.structs.DynamicPriorityQueue;
import com.ishland.flowsched.util.Assertions;
import it.unimi.dsi.fastutil.objects.ReferenceArrayList;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.Executor;
import java.util.function.Consumer;
public class ExecutorManager {
public final DynamicPriorityQueue<Task> globalWorkQueue;
protected final ConcurrentMap<LockToken, FreeableTaskList> lockListeners = new ConcurrentHashMap<>();
protected final WorkerThread[] workerThreads;
final Object workerMonitor = new Object();
/**
* Creates a new executor manager.
*
* @param workerThreadCount the number of worker threads.
*/
public ExecutorManager(int workerThreadCount) {
this(workerThreadCount, thread -> { });
}
/**
* Creates a new executor manager.
*
* @param workerThreadCount the number of worker threads.
* @param threadInitializer the thread initializer.
*/
public ExecutorManager(int workerThreadCount, Consumer<Thread> threadInitializer) {
globalWorkQueue = new DynamicPriorityQueue<>();
workerThreads = new WorkerThread[workerThreadCount];
for (int i = 0; i < workerThreadCount; i++) {
final WorkerThread thread = new WorkerThread(this);
threadInitializer.accept(thread);
thread.start();
workerThreads[i] = thread;
}
}
/**
* Attempt to lock the given tokens.
* The caller should discard the task if this method returns false, as it reschedules the task.
*
* @return {@code true} if the lock is acquired, {@code false} otherwise.
*/
boolean tryLock(Task task) {
retry:
while (true) {
final FreeableTaskList listenerSet = new FreeableTaskList();
LockToken[] lockTokens = task.lockTokens();
for (int i = 0; i < lockTokens.length; i++) {
LockToken token = lockTokens[i];
final FreeableTaskList present = this.lockListeners.putIfAbsent(token, listenerSet);
if (present != null) {
for (int j = 0; j < i; j++) {
this.lockListeners.remove(lockTokens[j], listenerSet);
}
callListeners(listenerSet);
synchronized (present) {
if (present.freed) {
continue retry;
} else {
present.add(task);
}
}
return false;
}
}
return true;
}
}
/**
* Release the locks held by the given task.
*
* @param task the task.
*/
void releaseLocks(Task task) {
FreeableTaskList expectedListeners = null;
for (LockToken token : task.lockTokens()) {
final FreeableTaskList listeners = this.lockListeners.remove(token);
if (listeners != null) {
if (expectedListeners == null) {
expectedListeners = listeners;
} else {
Assertions.assertTrue(expectedListeners == listeners, "Inconsistent lock listeners");
}
} else {
throw new IllegalStateException("Lock token " + token + " is not locked");
}
}
if (expectedListeners != null) {
callListeners(expectedListeners); // synchronizes
}
}
private void callListeners(FreeableTaskList listeners) {
synchronized (listeners) {
listeners.freed = true;
if (listeners.isEmpty()) return;
for (Task listener : listeners) {
this.schedule0(listener);
}
}
this.wakeup();
}
/**
* Polls an executable task from the global work queue.
*
* @return the task, or {@code null} if no task is executable.
*/
Task pollExecutableTask() {
Task task;
while ((task = this.globalWorkQueue.dequeue()) != null) {
if (this.tryLock(task)) {
return task;
}
}
return null;
}
/**
* Shuts down the executor manager.
*/
public void shutdown() {
for (WorkerThread workerThread : workerThreads) {
workerThread.shutdown();
}
}
/**
* Schedules a task.
*
* @param task the task.
*/
public void schedule(Task task) {
schedule0(task);
wakeup();
}
private void schedule0(Task task) {
this.globalWorkQueue.enqueue(task, task.priority());
}
public void wakeup() { // Canvas - private -> public
synchronized (this.workerMonitor) {
this.workerMonitor.notify();
}
}
public boolean hasPendingTasks() {
return this.globalWorkQueue.size() != 0;
}
/**
* Schedules a runnable for execution with the given priority.
*
* @param runnable the runnable.
* @param priority the priority.
*/
public void schedule(Runnable runnable, int priority) {
this.schedule(new SimpleTask(runnable, priority));
}
/**
* Creates an executor that schedules runnables with the given priority.
*
* @param priority the priority.
* @return the executor.
*/
public Executor executor(int priority) {
return runnable -> this.schedule(runnable, priority);
}
/**
* Notifies the executor manager that the priority of the given task has changed.
*
* @param task the task.
*/
public void notifyPriorityChange(Task task) {
this.globalWorkQueue.changePriority(task, task.priority());
}
protected static class FreeableTaskList extends ReferenceArrayList<Task> { // Canvas - private -> protected
private boolean freed = false;
}
}

View File

@@ -1,3 +0,0 @@
package com.ishland.flowsched.executor;
public interface LockToken { }

View File

@@ -1,37 +0,0 @@
package com.ishland.flowsched.executor;
import java.util.Objects;
public class SimpleTask implements Task {
private final Runnable wrapped;
private final int priority;
public SimpleTask(Runnable wrapped, int priority) {
this.wrapped = Objects.requireNonNull(wrapped);
this.priority = priority;
}
@Override
public void run(Runnable releaseLocks) {
try {
wrapped.run();
} finally {
releaseLocks.run();
}
}
@Override
public void propagateException(Throwable t) {
t.printStackTrace();
}
@Override
public LockToken[] lockTokens() {
return new LockToken[0];
}
@Override
public int priority() {
return this.priority;
}
}

View File

@@ -1,11 +0,0 @@
package com.ishland.flowsched.executor;
public interface Task {
void run(Runnable releaseLocks);
void propagateException(Throwable t);
LockToken[] lockTokens();
int priority();
}

View File

@@ -1,84 +0,0 @@
package com.ishland.flowsched.executor;
import ca.spottedleaf.moonrise.common.util.TickThread;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.LockSupport;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class WorkerThread extends TickThread {
private static final Logger LOGGER = LoggerFactory.getLogger("FlowSched Executor Worker Thread");
private final ExecutorManager executorManager;
private final AtomicBoolean shutdown = new AtomicBoolean(false);
public volatile boolean active = false;
public WorkerThread(ExecutorManager executorManager) {
super("null_worker");
this.executorManager = executorManager;
}
@Override
public void run() {
main_loop:
while (true) {
if (this.shutdown.get()) {
return;
}
active = true;
if (pollTasks()) {
continue;
}
synchronized (this.executorManager.workerMonitor) {
if (this.executorManager.hasPendingTasks()) continue main_loop;
try {
active = false;
this.executorManager.workerMonitor.wait();
} catch (InterruptedException ignored) {
}
}
}
}
private boolean pollTasks() {
final Task task = executorManager.pollExecutableTask();
try {
if (task != null) {
AtomicBoolean released = new AtomicBoolean(false);
try {
task.run(() -> {
if (released.compareAndSet(false, true)) {
executorManager.releaseLocks(task);
}
});
} catch (Throwable t) {
try {
if (released.compareAndSet(false, true)) {
executorManager.releaseLocks(task);
}
} catch (Throwable t1) {
t.addSuppressed(t1);
LOGGER.error("Exception thrown while releasing locks", t);
}
try {
task.propagateException(t);
} catch (Throwable t1) {
t.addSuppressed(t1);
LOGGER.error("Exception thrown while propagating exception", t);
}
}
return true;
}
return false;
} catch (Throwable t) {
LOGGER.error("Exception thrown while executing task", t);
return true;
}
}
public void shutdown() {
shutdown.set(true);
LockSupport.unpark(this);
}
}

View File

@@ -1,86 +0,0 @@
package com.ishland.flowsched.structs;
import ca.spottedleaf.moonrise.common.util.MoonriseConstants;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.atomic.AtomicIntegerArray;
@SuppressWarnings("unchecked")
public class DynamicPriorityQueue<E> {
public static final int MAX_PRIORITY = MoonriseConstants.MAX_VIEW_DISTANCE + 3;
private final AtomicIntegerArray taskCount;
public final ConcurrentLinkedQueue<E>[] priorities;
private final ConcurrentHashMap<E, Integer> priorityMap = new ConcurrentHashMap<>();
public DynamicPriorityQueue() {
this.taskCount = new AtomicIntegerArray(MAX_PRIORITY);
this.priorities = new ConcurrentLinkedQueue[MAX_PRIORITY];
for (int i = 0; i < (MAX_PRIORITY); i++) {
this.priorities[i] = new ConcurrentLinkedQueue<>();
}
}
public void enqueue(E element, int priority) {
if (this.priorityMap.putIfAbsent(element, priority) != null)
throw new IllegalArgumentException("Element already in queue");
this.priorities[priority].add(element);
this.taskCount.incrementAndGet(priority);
}
public boolean changePriority(E element, int newPriority) {
Integer currentPriority = this.priorityMap.get(element);
if (currentPriority == null || currentPriority == newPriority) {
return false;
}
int currentIndex = currentPriority;
boolean removedFromQueue = this.priorities[currentIndex].remove(element);
if (!removedFromQueue) {
return false;
}
this.taskCount.decrementAndGet(currentIndex);
final boolean changeSuccess = this.priorityMap.replace(element, currentPriority, newPriority);
if (!changeSuccess) {
return false;
}
this.priorities[newPriority].add(element);
this.taskCount.incrementAndGet(newPriority);
return true;
}
public E dequeue() {
for (int i = 0; i < this.priorities.length; i++) {
if (this.taskCount.get(i) == 0) continue;
E element = priorities[i].poll();
if (element != null) {
this.taskCount.decrementAndGet(i);
this.priorityMap.remove(element);
return element;
}
}
return null;
}
public boolean contains(E element) {
return priorityMap.containsKey(element);
}
public void remove(E element) {
Integer priority = this.priorityMap.remove(element);
if (priority == null) return;
boolean removed = this.priorities[priority].remove(element);
if (removed) this.taskCount.decrementAndGet(priority);
}
public int size() {
return priorityMap.size();
}
public boolean isEmpty() {
return size() == 0;
}
}

View File

@@ -1,27 +0,0 @@
package com.ishland.flowsched.util;
public final class Assertions {
public static void assertTrue(boolean value, String message) {
if (!value) {
final AssertionError error = new AssertionError(message);
error.printStackTrace();
throw error;
}
}
public static void assertTrue(boolean state, String format, Object... args) {
if (!state) {
final AssertionError error = new AssertionError(String.format(format, args));
error.printStackTrace();
throw error;
}
}
public static void assertTrue(boolean value) {
if (!value) {
final AssertionError error = new AssertionError();
error.printStackTrace();
throw error;
}
}
}

View File

@@ -1,42 +0,0 @@
package org.bxteam.divinemc.region;
import ca.spottedleaf.moonrise.patches.chunk_system.storage.ChunkSystemRegionFile;
import net.minecraft.nbt.CompoundTag;
import net.minecraft.world.level.ChunkPos;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.file.Path;
public interface IRegionFile extends AutoCloseable, ChunkSystemRegionFile {
Path getPath();
void flush() throws IOException;
void clear(ChunkPos pos) throws IOException;
@Override
void close() throws IOException;
void setOversized(int x, int z, boolean b) throws IOException;
void write(ChunkPos pos, ByteBuffer buffer) throws IOException;
boolean hasChunk(ChunkPos pos);
boolean doesChunkExist(ChunkPos pos) throws Exception;
boolean isOversized(int x, int z);
boolean recalculateHeader() throws IOException;
int getRecalculateCount();
DataOutputStream getChunkDataOutputStream(ChunkPos pos) throws IOException;
DataInputStream getChunkDataInputStream(ChunkPos pos) throws IOException;
CompoundTag getOversizedData(int x, int z) throws IOException;
}

View File

@@ -1,6 +0,0 @@
package org.bxteam.divinemc.region;
public enum LinearImplementation {
V1,
V2
}

View File

@@ -1,664 +0,0 @@
package org.bxteam.divinemc.region;
import ca.spottedleaf.moonrise.patches.chunk_system.io.MoonriseRegionFileIO;
import com.github.luben.zstd.ZstdInputStream;
import com.github.luben.zstd.ZstdOutputStream;
import com.mojang.logging.LogUtils;
import net.jpountz.lz4.LZ4Compressor;
import net.jpountz.lz4.LZ4Factory;
import net.jpountz.lz4.LZ4FastDecompressor;
import net.minecraft.nbt.CompoundTag;
import net.minecraft.world.level.ChunkPos;
import net.openhft.hashing.LongHashFunction;
import org.bxteam.divinemc.config.DivineConfig;
import org.bxteam.divinemc.spark.ThreadDumperRegistry;
import org.jspecify.annotations.Nullable;
import org.slf4j.Logger;
import java.io.BufferedOutputStream;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
import java.util.ArrayList;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.LockSupport;
public class LinearRegionFile implements IRegionFile {
public static final int MAX_CHUNK_SIZE = 500 * 1024 * 1024;
private static final Object SAVE_LOCK = new Object();
private static final long SUPERBLOCK = 0xc3ff13183cca9d9aL;
private static final Logger LOGGER = LogUtils.getLogger();
private static final byte V1_VERSION = 2;
private static final byte V2_VERSION = 3;
private byte[][] bucketBuffers;
private final byte[][] chunkCompressedBuffers = new byte[1024][];
private final int[] chunkUncompressedSizes = new int[1024];
private final long[] chunkTimestamps = new long[1024];
private final Object markedToSaveLock = new Object();
private boolean markedToSave = false;
private final LZ4Compressor compressor;
private final LZ4FastDecompressor decompressor;
private volatile boolean regionFileOpen = false;
private volatile boolean close = false;
private final Path regionFilePath;
private final int gridSizeDefault = 8;
private int gridSize = gridSizeDefault;
private int bucketSize = 4;
private final int compressionLevel;
private final LinearImplementation linearImpl;
private final Thread schedulingThread;
private static int activeSaveThreads = 0;
public LinearRegionFile(Path path, LinearImplementation linearImplementation, int compressionLevel) {
this.regionFilePath = path;
this.linearImpl = linearImplementation;
this.compressionLevel = compressionLevel;
this.compressor = LZ4Factory.fastestInstance().fastCompressor();
this.decompressor = LZ4Factory.fastestInstance().fastDecompressor();
Runnable flushCheck = () -> {
while (!close) {
synchronized (SAVE_LOCK) {
if (markedToSave && activeSaveThreads < DivineConfig.MiscCategory.linearFlushMaxThreads) {
activeSaveThreads++;
Runnable flushOperation = () -> {
try {
flush();
} catch (IOException ex) {
LOGGER.error("Region file {} flush failed", regionFilePath.toAbsolutePath(), ex);
} finally {
synchronized (SAVE_LOCK) {
activeSaveThreads--;
}
}
};
Thread saveThread = DivineConfig.MiscCategory.linearUseVirtualThread
? Thread.ofVirtual().name("Linear IO - " + this.hashCode()).unstarted(flushOperation)
: Thread.ofPlatform().name("Linear IO - " + this.hashCode()).unstarted(flushOperation);
saveThread.setPriority(Thread.NORM_PRIORITY - 3);
saveThread.start();
ThreadDumperRegistry.REGISTRY.add(saveThread.getName());
}
}
LockSupport.parkNanos(TimeUnit.MILLISECONDS.toNanos(DivineConfig.MiscCategory.linearFlushDelay));
}
};
this.schedulingThread = DivineConfig.MiscCategory.linearUseVirtualThread
? Thread.ofVirtual().unstarted(flushCheck)
: Thread.ofPlatform().unstarted(flushCheck);
this.schedulingThread.setName("Linear IO Schedule - " + this.hashCode());
ThreadDumperRegistry.REGISTRY.add(this.schedulingThread.getName());
}
private synchronized void openRegionFile() {
if (regionFileOpen) return;
regionFileOpen = true;
File file = regionFilePath.toFile();
if (!file.canRead()) {
schedulingThread.start();
return;
}
try {
byte[] fileContent = Files.readAllBytes(regionFilePath);
ByteBuffer byteBuffer = ByteBuffer.wrap(fileContent);
long superBlock = byteBuffer.getLong();
if (superBlock != SUPERBLOCK) {
throw new RuntimeException("Invalid superblock: " + superBlock + " file " + regionFilePath);
}
byte version = byteBuffer.get();
if (version == V1_VERSION) {
parseLinearV1(byteBuffer);
} else if (version == V2_VERSION) {
parseLinearV2(byteBuffer);
} else {
throw new RuntimeException("Invalid version: " + version + " file " + regionFilePath);
}
schedulingThread.start();
} catch (IOException e) {
throw new RuntimeException("Failed to open region file " + regionFilePath, e);
}
}
private void parseLinearV1(ByteBuffer buffer) throws IOException {
final int HEADER_SIZE = 32;
final int FOOTER_SIZE = 8;
buffer.position(buffer.position() + 11);
int dataCount = buffer.getInt();
long fileLength = regionFilePath.toFile().length();
if (fileLength != HEADER_SIZE + dataCount + FOOTER_SIZE) {
throw new IOException("Invalid file length: " + regionFilePath + " " + fileLength + " expected " + (HEADER_SIZE + dataCount + FOOTER_SIZE));
}
buffer.position(buffer.position() + 8);
byte[] rawCompressed = new byte[dataCount];
buffer.get(rawCompressed);
try (ByteArrayInputStream bais = new ByteArrayInputStream(rawCompressed);
ZstdInputStream zstdIn = new ZstdInputStream(bais)) {
ByteBuffer decompressedBuffer = ByteBuffer.wrap(zstdIn.readAllBytes());
int[] starts = new int[1024];
for (int i = 0; i < 1024; i++) {
starts[i] = decompressedBuffer.getInt();
decompressedBuffer.getInt();
}
for (int i = 0; i < 1024; i++) {
if (starts[i] > 0) {
int size = starts[i];
byte[] chunkData = new byte[size];
decompressedBuffer.get(chunkData);
int maxCompressedLength = compressor.maxCompressedLength(size);
byte[] compressed = new byte[maxCompressedLength];
int compressedLength = compressor.compress(chunkData, 0, size, compressed, 0, maxCompressedLength);
byte[] finalCompressed = new byte[compressedLength];
System.arraycopy(compressed, 0, finalCompressed, 0, compressedLength);
chunkCompressedBuffers[i] = finalCompressed;
chunkUncompressedSizes[i] = size;
chunkTimestamps[i] = currentTimestamp();
}
}
}
}
private void parseLinearV2(ByteBuffer buffer) throws IOException {
buffer.getLong();
gridSize = buffer.get();
if (!(gridSize == 1 || gridSize == 2 || gridSize == 4 || gridSize == 8 || gridSize == 16 || gridSize == 32)) {
throw new RuntimeException("Invalid grid size: " + gridSize + " file " + regionFilePath);
}
bucketSize = 32 / gridSize;
buffer.getInt();
buffer.getInt();
boolean[] chunkExistenceBitmap = deserializeExistenceBitmap(buffer);
while (true) {
byte featureNameLength = buffer.get();
if (featureNameLength == 0) break;
byte[] featureNameBytes = new byte[featureNameLength];
buffer.get(featureNameBytes);
String featureName = new String(featureNameBytes);
int featureValue = buffer.getInt();
}
int bucketCount = gridSize * gridSize;
int[] bucketSizes = new int[bucketCount];
byte[] bucketCompressionLevels = new byte[bucketCount];
long[] bucketHashes = new long[bucketCount];
for (int i = 0; i < bucketCount; i++) {
bucketSizes[i] = buffer.getInt();
bucketCompressionLevels[i] = buffer.get();
bucketHashes[i] = buffer.getLong();
}
bucketBuffers = new byte[bucketCount][];
for (int i = 0; i < bucketCount; i++) {
if (bucketSizes[i] > 0) {
bucketBuffers[i] = new byte[bucketSizes[i]];
buffer.get(bucketBuffers[i]);
long rawHash = LongHashFunction.xx().hashBytes(bucketBuffers[i]);
if (rawHash != bucketHashes[i]) {
throw new IOException("Region file hash incorrect " + regionFilePath);
}
}
}
long footerSuperBlock = buffer.getLong();
if (footerSuperBlock != SUPERBLOCK) {
throw new IOException("Footer superblock invalid " + regionFilePath);
}
}
private synchronized void markToSave() {
synchronized (markedToSaveLock) {
markedToSave = true;
}
}
private synchronized boolean isMarkedToSave() {
synchronized (markedToSaveLock) {
if (markedToSave) {
markedToSave = false;
return true;
}
return false;
}
}
@Override
public synchronized boolean doesChunkExist(ChunkPos pos) {
openRegionFile();
return hasChunk(pos);
}
@Override
public synchronized boolean hasChunk(ChunkPos pos) {
openRegionFile();
openBucketForChunk(pos.x, pos.z);
int index = getChunkIndex(pos.x, pos.z);
return chunkUncompressedSizes[index] > 0;
}
@Override
public synchronized void flush() throws IOException {
if (!isMarkedToSave()) return;
openRegionFile();
if (linearImpl == LinearImplementation.V1) {
flushLinearV1();
} else if (linearImpl == LinearImplementation.V2) {
flushLinearV2();
}
}
private void flushLinearV1() throws IOException {
long timestamp = currentTimestamp();
short chunkCount = 0;
File tempFile = new File(regionFilePath.toString() + ".tmp");
try (FileOutputStream fos = new FileOutputStream(tempFile);
ByteArrayOutputStream zstdBAOS = new ByteArrayOutputStream();
ZstdOutputStream zstdOut = new ZstdOutputStream(zstdBAOS, compressionLevel);
DataOutputStream zstdDataOut = new DataOutputStream(zstdOut);
DataOutputStream fileDataOut = new DataOutputStream(fos)) {
fileDataOut.writeLong(SUPERBLOCK);
fileDataOut.writeByte(V1_VERSION);
fileDataOut.writeLong(timestamp);
fileDataOut.writeByte(compressionLevel);
ArrayList<byte[]> decompressedChunks = new ArrayList<>(1024);
for (int i = 0; i < 1024; i++) {
if (chunkUncompressedSizes[i] != 0) {
chunkCount++;
byte[] decompressed = new byte[chunkUncompressedSizes[i]];
decompressor.decompress(chunkCompressedBuffers[i], 0, decompressed, 0, chunkUncompressedSizes[i]);
decompressedChunks.add(decompressed);
} else {
decompressedChunks.add(null);
}
}
for (int i = 0; i < 1024; i++) {
zstdDataOut.writeInt(chunkUncompressedSizes[i]);
zstdDataOut.writeInt((int) chunkTimestamps[i]);
}
for (int i = 0; i < 1024; i++) {
if (decompressedChunks.get(i) != null) {
zstdDataOut.write(decompressedChunks.get(i));
}
}
zstdDataOut.close();
fileDataOut.writeShort(chunkCount);
byte[] compressedZstdData = zstdBAOS.toByteArray();
fileDataOut.writeInt(compressedZstdData.length);
fileDataOut.writeLong(0);
fileDataOut.write(compressedZstdData);
fileDataOut.writeLong(SUPERBLOCK);
fileDataOut.flush();
fos.getFD().sync();
fos.getChannel().force(true);
}
Files.move(tempFile.toPath(), regionFilePath, StandardCopyOption.REPLACE_EXISTING);
}
private void flushLinearV2() throws IOException {
long timestamp = currentTimestamp();
File tempFile = new File(regionFilePath.toString() + ".tmp");
try (FileOutputStream fos = new FileOutputStream(tempFile);
DataOutputStream dataOut = new DataOutputStream(fos)) {
dataOut.writeLong(SUPERBLOCK);
dataOut.writeByte(V2_VERSION);
dataOut.writeLong(timestamp);
dataOut.writeByte(gridSize);
int[] regionCoords = parseRegionCoordinates(regionFilePath.getFileName().toString());
dataOut.writeInt(regionCoords[0]);
dataOut.writeInt(regionCoords[1]);
boolean[] chunkExistence = new boolean[1024];
for (int i = 0; i < 1024; i++) {
chunkExistence[i] = (chunkUncompressedSizes[i] > 0);
}
writeExistenceBitmap(dataOut, chunkExistence);
writeNBTFeatures(dataOut);
byte[][] buckets = buildBuckets();
int bucketCount = gridSize * gridSize;
for (int i = 0; i < bucketCount; i++) {
dataOut.writeInt(buckets[i] != null ? buckets[i].length : 0);
dataOut.writeByte(compressionLevel);
long bucketHash = buckets[i] != null ? LongHashFunction.xx().hashBytes(buckets[i]) : 0;
dataOut.writeLong(bucketHash);
}
for (int i = 0; i < bucketCount; i++) {
if (buckets[i] != null) {
dataOut.write(buckets[i]);
}
}
dataOut.writeLong(SUPERBLOCK);
dataOut.flush();
fos.getFD().sync();
fos.getChannel().force(true);
}
Files.move(tempFile.toPath(), regionFilePath, StandardCopyOption.REPLACE_EXISTING);
}
private void writeNBTFeatures(DataOutputStream dataOut) throws IOException {
dataOut.writeByte(0);
}
private byte[][] buildBuckets() throws IOException {
int bucketCount = gridSize * gridSize;
byte[][] buckets = new byte[bucketCount][];
for (int bx = 0; bx < gridSize; bx++) {
for (int bz = 0; bz < gridSize; bz++) {
int bucketIdx = bx * gridSize + bz;
if (bucketBuffers != null && bucketBuffers[bucketIdx] != null) {
buckets[bucketIdx] = bucketBuffers[bucketIdx];
continue;
}
try (ByteArrayOutputStream bucketBAOS = new ByteArrayOutputStream();
ZstdOutputStream bucketZstdOut = new ZstdOutputStream(bucketBAOS, compressionLevel);
DataOutputStream bucketDataOut = new DataOutputStream(bucketZstdOut)) {
boolean hasData = false;
int cellCount = 32 / gridSize;
for (int cx = 0; cx < cellCount; cx++) {
for (int cz = 0; cz < cellCount; cz++) {
int chunkIndex = (bx * cellCount + cx) + (bz * cellCount + cz) * 32;
if (chunkUncompressedSizes[chunkIndex] > 0) {
hasData = true;
byte[] chunkData = new byte[chunkUncompressedSizes[chunkIndex]];
decompressor.decompress(chunkCompressedBuffers[chunkIndex], 0, chunkData, 0, chunkUncompressedSizes[chunkIndex]);
bucketDataOut.writeInt(chunkData.length + 8);
bucketDataOut.writeLong(chunkTimestamps[chunkIndex]);
bucketDataOut.write(chunkData);
} else {
bucketDataOut.writeInt(0);
bucketDataOut.writeLong(chunkTimestamps[chunkIndex]);
}
}
}
bucketDataOut.close();
if (hasData) {
buckets[bucketIdx] = bucketBAOS.toByteArray();
}
}
}
}
return buckets;
}
private void openBucketForChunk(int chunkX, int chunkZ) {
int modX = Math.floorMod(chunkX, 32);
int modZ = Math.floorMod(chunkZ, 32);
int bucketIdx = chunkToBucketIndex(modX, modZ);
if (bucketBuffers == null || bucketBuffers[bucketIdx] == null) {
return;
}
try (ByteArrayInputStream bucketBAIS = new ByteArrayInputStream(bucketBuffers[bucketIdx]);
ZstdInputStream bucketZstdIn = new ZstdInputStream(bucketBAIS)) {
ByteBuffer bucketBuffer = ByteBuffer.wrap(bucketZstdIn.readAllBytes());
int cellsPerBucket = 32 / gridSize;
int bx = modX / bucketSize, bz = modZ / bucketSize;
for (int cx = 0; cx < cellsPerBucket; cx++) {
for (int cz = 0; cz < cellsPerBucket; cz++) {
int chunkIndex = (bx * cellsPerBucket + cx) + (bz * cellsPerBucket + cz) * 32;
int chunkSize = bucketBuffer.getInt();
long timestamp = bucketBuffer.getLong();
chunkTimestamps[chunkIndex] = timestamp;
if (chunkSize > 0) {
byte[] chunkData = new byte[chunkSize - 8];
bucketBuffer.get(chunkData);
int maxCompressedLength = compressor.maxCompressedLength(chunkData.length);
byte[] compressed = new byte[maxCompressedLength];
int compressedLength = compressor.compress(chunkData, 0, chunkData.length, compressed, 0, maxCompressedLength);
byte[] finalCompressed = new byte[compressedLength];
System.arraycopy(compressed, 0, finalCompressed, 0, compressedLength);
chunkCompressedBuffers[chunkIndex] = finalCompressed;
chunkUncompressedSizes[chunkIndex] = chunkData.length;
}
}
}
} catch (IOException ex) {
throw new RuntimeException("Region file corrupted: " + regionFilePath + " bucket: " + bucketIdx, ex);
}
bucketBuffers[bucketIdx] = null;
}
@Override
public synchronized void write(ChunkPos pos, ByteBuffer buffer) {
openRegionFile();
openBucketForChunk(pos.x, pos.z);
try {
byte[] rawData = toByteArray(new ByteArrayInputStream(buffer.array()));
int uncompressedSize = rawData.length;
if (uncompressedSize > MAX_CHUNK_SIZE) {
LOGGER.error("Chunk dupe attempt {}", regionFilePath);
clear(pos);
} else {
int maxCompressedLength = compressor.maxCompressedLength(uncompressedSize);
byte[] compressed = new byte[maxCompressedLength];
int compressedLength = compressor.compress(rawData, 0, uncompressedSize, compressed, 0, maxCompressedLength);
byte[] finalCompressed = new byte[compressedLength];
System.arraycopy(compressed, 0, finalCompressed, 0, compressedLength);
int index = getChunkIndex(pos.x, pos.z);
chunkCompressedBuffers[index] = finalCompressed;
chunkTimestamps[index] = currentTimestamp();
chunkUncompressedSizes[index] = uncompressedSize;
}
} catch (IOException e) {
LOGGER.error("Chunk write IOException {} {}", e, regionFilePath);
}
markToSave();
}
@Override
public DataOutputStream getChunkDataOutputStream(ChunkPos pos) {
openRegionFile();
openBucketForChunk(pos.x, pos.z);
return new DataOutputStream(new BufferedOutputStream(new ChunkBuffer(pos)));
}
@Override
public MoonriseRegionFileIO.RegionDataController.WriteData moonrise$startWrite(CompoundTag data, ChunkPos pos) {
DataOutputStream out = getChunkDataOutputStream(pos);
return new ca.spottedleaf.moonrise.patches.chunk_system.io.MoonriseRegionFileIO.RegionDataController.WriteData(
data,
ca.spottedleaf.moonrise.patches.chunk_system.io.MoonriseRegionFileIO.RegionDataController.WriteData.WriteResult.WRITE,
out,
regionFile -> {
try {
out.close();
} catch (IOException e) {
LOGGER.error("Failed to close region file stream", e);
}
}
);
}
private class ChunkBuffer extends ByteArrayOutputStream {
private final ChunkPos pos;
public ChunkBuffer(ChunkPos pos) {
super();
this.pos = pos;
}
@Override
public void close() {
ByteBuffer byteBuffer = ByteBuffer.wrap(this.buf, 0, this.count);
LinearRegionFile.this.write(this.pos, byteBuffer);
}
}
private byte[] toByteArray(InputStream in) throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
byte[] tempBuffer = new byte[4096];
int length;
while ((length = in.read(tempBuffer)) >= 0) {
out.write(tempBuffer, 0, length);
}
return out.toByteArray();
}
@Nullable
@Override
public synchronized DataInputStream getChunkDataInputStream(ChunkPos pos) {
openRegionFile();
openBucketForChunk(pos.x, pos.z);
int index = getChunkIndex(pos.x, pos.z);
if (chunkUncompressedSizes[index] != 0) {
byte[] decompressed = new byte[chunkUncompressedSizes[index]];
decompressor.decompress(chunkCompressedBuffers[index], 0, decompressed, 0, chunkUncompressedSizes[index]);
return new DataInputStream(new ByteArrayInputStream(decompressed));
}
return null;
}
@Override
public synchronized void clear(ChunkPos pos) {
openRegionFile();
openBucketForChunk(pos.x, pos.z);
int index = getChunkIndex(pos.x, pos.z);
chunkCompressedBuffers[index] = null;
chunkUncompressedSizes[index] = 0;
chunkTimestamps[index] = 0;
markToSave();
}
@Override
public synchronized void close() throws IOException {
openRegionFile();
close = true;
try {
flush();
} catch (IOException e) {
throw new IOException("Region flush IOException " + e + " " + regionFilePath, e);
}
}
private static int getChunkIndex(int x, int z) {
return (x & 31) + ((z & 31) << 5);
}
private static int currentTimestamp() {
return (int) (System.currentTimeMillis() / 1000L);
}
@Override
public boolean recalculateHeader() {
return false;
}
@Override
public int getRecalculateCount() {
return 0;
}
@Override
public void setOversized(int x, int z, boolean something) {
// stub
}
@Override
public CompoundTag getOversizedData(int x, int z) throws IOException {
throw new IOException("getOversizedData is a stub " + regionFilePath);
}
@Override
public boolean isOversized(int x, int z) {
return false;
}
@Override
public Path getPath() {
return regionFilePath;
}
private boolean[] deserializeExistenceBitmap(ByteBuffer buffer) {
boolean[] result = new boolean[1024];
for (int i = 0; i < 128; i++) {
byte b = buffer.get();
for (int j = 0; j < 8; j++) {
result[i * 8 + j] = ((b >> (7 - j)) & 1) == 1;
}
}
return result;
}
private void writeExistenceBitmap(DataOutputStream out, boolean[] bitmap) throws IOException {
for (int i = 0; i < 128; i++) {
byte b = 0;
for (int j = 0; j < 8; j++) {
if (bitmap[i * 8 + j]) {
b |= (1 << (7 - j));
}
}
out.writeByte(b);
}
}
private int chunkToBucketIndex(int chunkX, int chunkZ) {
int bx = chunkX / bucketSize, bz = chunkZ / bucketSize;
return bx * gridSize + bz;
}
private int[] parseRegionCoordinates(String fileName) {
int regionX = 0;
int regionZ = 0;
String[] parts = fileName.split("\\.");
if (parts.length >= 4) {
try {
regionX = Integer.parseInt(parts[1]);
regionZ = Integer.parseInt(parts[2]);
} catch (NumberFormatException e) {
LOGGER.error("Failed to parse region coordinates from file name: {}", fileName, e);
}
} else {
LOGGER.warn("Unexpected file name format: {}", fileName);
}
return new int[]{regionX, regionZ};
}
}

View File

@@ -1,34 +0,0 @@
package org.bxteam.divinemc.region;
import net.minecraft.world.level.chunk.storage.RegionFile;
import net.minecraft.world.level.chunk.storage.RegionFileVersion;
import net.minecraft.world.level.chunk.storage.RegionStorageInfo;
import org.bxteam.divinemc.config.DivineConfig;
import org.jetbrains.annotations.Contract;
import org.jetbrains.annotations.NotNull;
import java.io.IOException;
import java.nio.file.Path;
public class RegionFileFactory {
@Contract("_, _, _, _ -> new")
public static @NotNull IRegionFile getAbstractRegionFile(RegionStorageInfo storageKey, Path directory, Path path, boolean dsync) throws IOException {
return getAbstractRegionFile(storageKey, directory, path, RegionFileVersion.getCompressionFormat(), dsync);
}
@Contract("_, _, _, _, _ -> new")
public static @NotNull IRegionFile getAbstractRegionFile(RegionStorageInfo storageKey, @NotNull Path path, Path directory, RegionFileVersion compressionFormat, boolean dsync) throws IOException {
final String fullFileName = path.getFileName().toString();
final String[] fullNameSplit = fullFileName.split("\\.");
final String extensionName = fullNameSplit[fullNameSplit.length - 1];
switch (RegionFileFormat.fromExtension(extensionName)) {
case LINEAR -> {
return new LinearRegionFile(path, DivineConfig.MiscCategory.linearImplementation, DivineConfig.MiscCategory.linearCompressionLevel);
}
default -> {
return new RegionFile(storageKey, path, directory, compressionFormat, dsync);
}
}
}
}

View File

@@ -1,55 +0,0 @@
package org.bxteam.divinemc.region;
import org.jetbrains.annotations.Contract;
import org.jetbrains.annotations.NotNull;
import java.util.Locale;
public enum RegionFileFormat {
LINEAR(".linear"),
ANVIL(".mca"),
UNKNOWN(null);
private final String extension;
RegionFileFormat(String extension) {
this.extension = extension;
}
public String getExtensionName() {
return this.extension;
}
@Contract(pure = true)
public static RegionFileFormat fromName(@NotNull String name) {
switch (name.toUpperCase(Locale.ROOT)) {
case "MCA", "ANVIL" -> {
return ANVIL;
}
case "LINEAR" -> {
return LINEAR;
}
default -> {
throw new IllegalArgumentException("Unknown region file format: " + name);
}
}
}
@Contract(pure = true)
public static RegionFileFormat fromExtension(@NotNull String name) {
switch (name.toLowerCase()) {
case "mca", "anvil" -> {
return ANVIL;
}
case "linear" -> {
return LINEAR;
}
default -> {
return UNKNOWN;
}
}
}
}

View File

@@ -1,404 +0,0 @@
package org.bxteam.divinemc.server.chunk;
import ca.spottedleaf.concurrentutil.executor.PrioritisedExecutor;
import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
import ca.spottedleaf.concurrentutil.util.Priority;
import java.lang.invoke.VarHandle;
import java.util.Comparator;
import java.util.Map;
import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
public final class ChunkSystemTaskQueue implements PrioritisedExecutor {
private final AtomicLong taskIdGenerator = new AtomicLong();
private final AtomicLong scheduledTasks = new AtomicLong();
private final AtomicLong executedTasks = new AtomicLong();
private final AtomicLong subOrderGenerator = new AtomicLong();
private final AtomicBoolean shutdown = new AtomicBoolean();
private final ConcurrentSkipListMap<ChunkSystemTaskQueue.PrioritisedQueuedTask.Holder, Boolean> tasks = new ConcurrentSkipListMap<>(ChunkSystemTaskQueue.PrioritisedQueuedTask.COMPARATOR);
private final TheChunkSystem chunkSystem;
public ChunkSystemTaskQueue(TheChunkSystem chunkSystem) {
this.chunkSystem = chunkSystem;
}
@Override
public long getTotalTasksScheduled() {
return this.scheduledTasks.get();
}
@Override
public long getTotalTasksExecuted() {
return this.executedTasks.get();
}
@Override
public long generateNextSubOrder() {
return this.subOrderGenerator.getAndIncrement();
}
@Override
public boolean shutdown() {
return !this.shutdown.getAndSet(true);
}
@Override
public boolean isShutdown() {
return this.shutdown.get();
}
@Override
public boolean executeTask() {
for (; ; ) {
final Map.Entry<ChunkSystemTaskQueue.PrioritisedQueuedTask.Holder, Boolean> firstEntry = this.tasks.pollFirstEntry();
if (firstEntry != null) {
final ChunkSystemTaskQueue.PrioritisedQueuedTask.Holder task = firstEntry.getKey();
task.markRemoved();
if (!task.task.execute()) {
continue;
}
return true;
}
return false;
}
}
@Override
public PrioritisedTask createTask(final Runnable task) {
return this.createTask(task, Priority.NORMAL, this.generateNextSubOrder());
}
@Override
public PrioritisedTask createTask(final Runnable task, final Priority priority) {
return this.createTask(task, priority, this.generateNextSubOrder());
}
@Override
public PrioritisedTask createTask(final Runnable task, final Priority priority, final long subOrder) {
return new ChunkSystemTaskQueue.PrioritisedQueuedTask(task, priority, subOrder);
}
@Override
public PrioritisedTask queueTask(final Runnable task) {
return this.queueTask(task, Priority.NORMAL, this.generateNextSubOrder());
}
@Override
public PrioritisedTask queueTask(final Runnable task, final Priority priority) {
return this.queueTask(task, priority, this.generateNextSubOrder());
}
@Override
public PrioritisedTask queueTask(final Runnable task, final Priority priority, final long subOrder) {
final ChunkSystemTaskQueue.PrioritisedQueuedTask ret = new ChunkSystemTaskQueue.PrioritisedQueuedTask(task, priority, subOrder);
ret.queue();
return ret;
}
private final class PrioritisedQueuedTask implements PrioritisedExecutor.PrioritisedTask {
public static final Comparator<ChunkSystemTaskQueue.PrioritisedQueuedTask.Holder> COMPARATOR = (final ChunkSystemTaskQueue.PrioritisedQueuedTask.Holder t1, final ChunkSystemTaskQueue.PrioritisedQueuedTask.Holder t2) -> {
final int priorityCompare = t1.priority - t2.priority;
if (priorityCompare != 0) {
return priorityCompare;
}
final int subOrderCompare = Long.compare(t1.subOrder, t2.subOrder);
if (subOrderCompare != 0) {
return subOrderCompare;
}
return Long.compare(t1.id, t2.id);
};
private final long id;
private final Runnable execute;
private Priority priority;
private long subOrder;
private ChunkSystemTaskQueue.PrioritisedQueuedTask.Holder holder;
public PrioritisedQueuedTask(final Runnable execute, final Priority priority, final long subOrder) {
if (!Priority.isValidPriority(priority)) {
throw new IllegalArgumentException("Invalid priority " + priority);
}
this.execute = execute;
this.priority = priority;
this.subOrder = subOrder;
this.id = ChunkSystemTaskQueue.this.taskIdGenerator.getAndIncrement();
}
@Override
public PrioritisedExecutor getExecutor() {
return ChunkSystemTaskQueue.this;
}
@Override
public boolean queue() {
synchronized (this) {
if (this.holder != null || this.priority == Priority.COMPLETING) {
return false;
}
if (ChunkSystemTaskQueue.this.isShutdown()) {
throw new IllegalStateException("Queue is shutdown");
}
this.holder = new Holder(this, this.priority.priority, this.subOrder, this.id);
ChunkSystemTaskQueue.this.scheduledTasks.getAndIncrement();
ChunkSystemTaskQueue.this.chunkSystem.schedule(this.holder.task.execute, this.holder.task.priority.priority);
}
if (ChunkSystemTaskQueue.this.isShutdown()) {
this.cancel();
throw new IllegalStateException("Queue is shutdown");
}
return true;
}
@Override
public boolean isQueued() {
synchronized (this) {
return this.holder != null && this.priority != Priority.COMPLETING;
}
}
@Override
public boolean cancel() {
synchronized (this) {
if (this.priority == Priority.COMPLETING) {
return false;
}
this.priority = Priority.COMPLETING;
if (this.holder != null) {
if (this.holder.markRemoved()) {
ChunkSystemTaskQueue.this.tasks.remove(this.holder);
}
ChunkSystemTaskQueue.this.executedTasks.getAndIncrement();
}
return true;
}
}
@Override
public boolean execute() {
final boolean increaseExecuted;
synchronized (this) {
if (this.priority == Priority.COMPLETING) {
return false;
}
this.priority = Priority.COMPLETING;
if (increaseExecuted = (this.holder != null)) {
if (this.holder.markRemoved()) {
ChunkSystemTaskQueue.this.tasks.remove(this.holder);
}
}
}
try {
this.execute.run();
return true;
} finally {
if (increaseExecuted) {
ChunkSystemTaskQueue.this.executedTasks.getAndIncrement();
}
}
}
@Override
public Priority getPriority() {
synchronized (this) {
return this.priority;
}
}
@Override
public boolean setPriority(final Priority priority) {
synchronized (this) {
if (this.priority == Priority.COMPLETING || this.priority == priority) {
return false;
}
this.priority = priority;
if (this.holder != null) {
if (this.holder.markRemoved()) {
ChunkSystemTaskQueue.this.tasks.remove(this.holder);
}
this.holder = new ChunkSystemTaskQueue.PrioritisedQueuedTask.Holder(this, priority.priority, this.subOrder, this.id);
ChunkSystemTaskQueue.this.tasks.put(this.holder, Boolean.TRUE);
}
return true;
}
}
@Override
public boolean raisePriority(final Priority priority) {
synchronized (this) {
if (this.priority == Priority.COMPLETING || this.priority.isHigherOrEqualPriority(priority)) {
return false;
}
this.priority = priority;
if (this.holder != null) {
if (this.holder.markRemoved()) {
ChunkSystemTaskQueue.this.tasks.remove(this.holder);
}
this.holder = new ChunkSystemTaskQueue.PrioritisedQueuedTask.Holder(this, priority.priority, this.subOrder, this.id);
ChunkSystemTaskQueue.this.tasks.put(this.holder, Boolean.TRUE);
}
return true;
}
}
@Override
public boolean lowerPriority(Priority priority) {
synchronized (this) {
if (this.priority == Priority.COMPLETING || this.priority.isLowerOrEqualPriority(priority)) {
return false;
}
this.priority = priority;
if (this.holder != null) {
if (this.holder.markRemoved()) {
ChunkSystemTaskQueue.this.tasks.remove(this.holder);
}
this.holder = new ChunkSystemTaskQueue.PrioritisedQueuedTask.Holder(this, priority.priority, this.subOrder, this.id);
ChunkSystemTaskQueue.this.tasks.put(this.holder, Boolean.TRUE);
}
return true;
}
}
@Override
public long getSubOrder() {
synchronized (this) {
return this.subOrder;
}
}
@Override
public boolean setSubOrder(final long subOrder) {
synchronized (this) {
if (this.priority == Priority.COMPLETING || this.subOrder == subOrder) {
return false;
}
this.subOrder = subOrder;
if (this.holder != null) {
if (this.holder.markRemoved()) {
ChunkSystemTaskQueue.this.tasks.remove(this.holder);
}
this.holder = new ChunkSystemTaskQueue.PrioritisedQueuedTask.Holder(this, priority.priority, this.subOrder, this.id);
ChunkSystemTaskQueue.this.tasks.put(this.holder, Boolean.TRUE);
}
return true;
}
}
@Override
public boolean raiseSubOrder(long subOrder) {
synchronized (this) {
if (this.priority == Priority.COMPLETING || this.subOrder >= subOrder) {
return false;
}
this.subOrder = subOrder;
if (this.holder != null) {
if (this.holder.markRemoved()) {
ChunkSystemTaskQueue.this.tasks.remove(this.holder);
}
this.holder = new ChunkSystemTaskQueue.PrioritisedQueuedTask.Holder(this, priority.priority, this.subOrder, this.id);
ChunkSystemTaskQueue.this.tasks.put(this.holder, Boolean.TRUE);
}
return true;
}
}
@Override
public boolean lowerSubOrder(final long subOrder) {
synchronized (this) {
if (this.priority == Priority.COMPLETING || this.subOrder <= subOrder) {
return false;
}
this.subOrder = subOrder;
if (this.holder != null) {
if (this.holder.markRemoved()) {
ChunkSystemTaskQueue.this.tasks.remove(this.holder);
}
this.holder = new ChunkSystemTaskQueue.PrioritisedQueuedTask.Holder(this, priority.priority, this.subOrder, this.id);
ChunkSystemTaskQueue.this.tasks.put(this.holder, Boolean.TRUE);
}
return true;
}
}
@Override
public boolean setPriorityAndSubOrder(final Priority priority, final long subOrder) {
synchronized (this) {
if (this.priority == Priority.COMPLETING || (this.priority == priority && this.subOrder == subOrder)) {
return false;
}
this.priority = priority;
this.subOrder = subOrder;
if (this.holder != null) {
if (this.holder.markRemoved()) {
ChunkSystemTaskQueue.this.tasks.remove(this.holder);
}
this.holder = new ChunkSystemTaskQueue.PrioritisedQueuedTask.Holder(this, priority.priority, this.subOrder, this.id);
ChunkSystemTaskQueue.this.tasks.put(this.holder, Boolean.TRUE);
}
return true;
}
}
private static final class Holder {
private static final VarHandle REMOVED_HANDLE = ConcurrentUtil.getVarHandle(ChunkSystemTaskQueue.PrioritisedQueuedTask.Holder.class, "removed", boolean.class);
private final ChunkSystemTaskQueue.PrioritisedQueuedTask task;
private final int priority;
private final long subOrder;
private final long id;
private volatile boolean removed;
private Holder(
final ChunkSystemTaskQueue.PrioritisedQueuedTask task, final int priority, final long subOrder,
final long id
) {
this.task = task;
this.priority = priority;
this.subOrder = subOrder;
this.id = id;
}
public boolean markRemoved() {
return !(boolean) REMOVED_HANDLE.getAndSet((ChunkSystemTaskQueue.PrioritisedQueuedTask.Holder) this, (boolean) true);
}
}
}
}

View File

@@ -1,355 +0,0 @@
package org.bxteam.divinemc.server.chunk;
import ca.spottedleaf.concurrentutil.executor.PrioritisedExecutor;
import ca.spottedleaf.concurrentutil.util.Priority;
import com.ishland.flowsched.executor.ExecutorManager;
import org.bxteam.divinemc.util.ThreadBuilder;
import org.jetbrains.annotations.NotNull;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.lang.reflect.Array;
import java.util.Arrays;
import java.util.concurrent.atomic.AtomicLong;
public class TheChunkSystem extends ExecutorManager {
protected final Logger LOGGER = LoggerFactory.getLogger("TheChunkSystem");
private final TheChunkSystem.COWArrayList<TheChunkSystem.ExecutorGroup> executors = new TheChunkSystem.COWArrayList<>(TheChunkSystem.ExecutorGroup.class);
private boolean shutdown;
public TheChunkSystem(final int workerThreadCount, final ThreadBuilder threadInitializer) {
super(workerThreadCount, threadInitializer);
LOGGER.info("Initialized new ChunkSystem with {} allocated threads", workerThreadCount);
}
@Override
public void shutdown() {
synchronized (this) {
this.shutdown = true;
}
super.shutdown();
this.wakeup();
for (final TheChunkSystem.ExecutorGroup group : this.executors.getArray()) {
for (final TheChunkSystem.ExecutorGroup.ThreadPoolExecutor executor : group.executors.getArray()) {
executor.shutdown();
}
}
LOGGER.info("ChunkSystem shutdown complete");
}
private void notifyAllThreads() {
this.wakeup();
}
public TheChunkSystem.ExecutorGroup createExecutorGroup() {
synchronized (this) {
if (this.shutdown) {
throw new IllegalStateException("Queue is shutdown: " + this);
}
final TheChunkSystem.ExecutorGroup ret = new TheChunkSystem.ExecutorGroup();
this.executors.add(ret);
return ret;
}
}
private static final class COWArrayList<E> {
private volatile E[] array;
public COWArrayList(final Class<E> clazz) {
this.array = (E[]) Array.newInstance(clazz, 0);
}
public E[] getArray() {
return this.array;
}
public void add(final E element) {
synchronized (this) {
final E[] array = this.array;
final E[] copy = Arrays.copyOf(array, array.length + 1);
copy[array.length] = element;
this.array = copy;
}
}
public boolean remove(final E element) {
synchronized (this) {
final E[] array = this.array;
int index = -1;
for (int i = 0, len = array.length; i < len; ++i) {
if (array[i] == element) {
index = i;
break;
}
}
if (index == -1) {
return false;
}
final E[] copy = (E[]) Array.newInstance(array.getClass().getComponentType(), array.length - 1);
System.arraycopy(array, 0, copy, 0, index);
System.arraycopy(array, index + 1, copy, index, (array.length - 1) - index);
this.array = copy;
}
return true;
}
}
public final class ExecutorGroup {
private final AtomicLong subOrderGenerator = new AtomicLong();
private final TheChunkSystem.COWArrayList<TheChunkSystem.ExecutorGroup.ThreadPoolExecutor> executors = new TheChunkSystem.COWArrayList<>(TheChunkSystem.ExecutorGroup.ThreadPoolExecutor.class);
private ExecutorGroup() { }
public TheChunkSystem.ExecutorGroup.ThreadPoolExecutor[] getAllExecutors() {
return this.executors.getArray().clone();
}
private TheChunkSystem getThreadPool() {
return TheChunkSystem.this;
}
public TheChunkSystem.ExecutorGroup.@NotNull ThreadPoolExecutor createExecutor() {
synchronized (TheChunkSystem.this) {
if (TheChunkSystem.this.shutdown) {
throw new IllegalStateException("Queue is shutdown: " + TheChunkSystem.this);
}
final TheChunkSystem.ExecutorGroup.ThreadPoolExecutor ret = new TheChunkSystem.ExecutorGroup.ThreadPoolExecutor();
this.executors.add(ret);
return ret;
}
}
public final class ThreadPoolExecutor implements PrioritisedExecutor {
private final ChunkSystemTaskQueue taskBuilder = new ChunkSystemTaskQueue(TheChunkSystem.this);
private volatile boolean halt;
private ThreadPoolExecutor() { }
private TheChunkSystem.ExecutorGroup getGroup() {
return TheChunkSystem.ExecutorGroup.this;
}
private void notifyPriorityShift() {
TheChunkSystem.this.notifyAllThreads();
}
private void notifyScheduled() {
TheChunkSystem.this.notifyAllThreads();
}
/**
* Removes this queue from the thread pool without shutting the queue down or waiting for queued tasks to be executed
*/
public void halt() {
this.halt = true;
TheChunkSystem.ExecutorGroup.this.executors.remove(this);
}
public boolean isActive() {
if (this.halt) {
return false;
} else {
if (!this.isShutdown()) {
return true;
}
return !TheChunkSystem.this.globalWorkQueue.isEmpty();
}
}
@Override
public boolean shutdown() {
if (TheChunkSystem.this.globalWorkQueue.isEmpty()) {
TheChunkSystem.ExecutorGroup.this.executors.remove(this);
}
return true;
}
@Override
public boolean isShutdown() {
return TheChunkSystem.this.shutdown;
}
@Override
public long getTotalTasksScheduled() {
return 0; // TODO: implement
}
@Override
public long getTotalTasksExecuted() {
return 0; // TODO: implement
}
@Override
public long generateNextSubOrder() {
return TheChunkSystem.ExecutorGroup.this.subOrderGenerator.getAndIncrement();
}
@Override
public boolean executeTask() {
throw new UnsupportedOperationException("Unable to execute task from ThreadPoolExecutor as interface into FlowSched");
}
@Override
public PrioritisedTask queueTask(final Runnable task) {
final PrioritisedTask ret = this.createTask(task);
ret.queue();
return ret;
}
@Override
public PrioritisedTask queueTask(final Runnable task, final Priority priority) {
final PrioritisedTask ret = this.createTask(task, priority);
ret.queue();
return ret;
}
@Override
public PrioritisedTask queueTask(final Runnable task, final Priority priority, final long subOrder) {
final PrioritisedTask ret = this.createTask(task, priority, subOrder);
ret.queue();
return ret;
}
@Override
public PrioritisedTask createTask(final Runnable task) {
return this.createTask(task, Priority.NORMAL);
}
@Override
public PrioritisedTask createTask(final Runnable task, final Priority priority) {
return this.createTask(task, priority, this.generateNextSubOrder());
}
@Override
public PrioritisedTask createTask(final Runnable task, final Priority priority, final long subOrder) {
return new TheChunkSystem.ExecutorGroup.ThreadPoolExecutor.WrappedTask(this.taskBuilder.createTask(task, priority, subOrder));
}
private final class WrappedTask implements PrioritisedTask {
private final PrioritisedTask wrapped;
private WrappedTask(final PrioritisedTask wrapped) {
this.wrapped = wrapped;
}
@Override
public PrioritisedExecutor getExecutor() {
return TheChunkSystem.ExecutorGroup.ThreadPoolExecutor.this;
}
@Override
public boolean queue() {
if (this.wrapped.queue()) {
final Priority priority = this.getPriority();
if (priority != Priority.COMPLETING) {
TheChunkSystem.ExecutorGroup.ThreadPoolExecutor.this.notifyScheduled();
}
return true;
}
return false;
}
@Override
public boolean isQueued() {
return this.wrapped.isQueued();
}
@Override
public boolean cancel() {
return this.wrapped.cancel();
}
@Override
public boolean execute() {
return this.wrapped.execute();
}
@Override
public Priority getPriority() {
return this.wrapped.getPriority();
}
@Override
public boolean setPriority(final Priority priority) {
if (this.wrapped.setPriority(priority)) {
TheChunkSystem.ExecutorGroup.ThreadPoolExecutor.this.notifyPriorityShift();
return true;
}
return false;
}
@Override
public boolean raisePriority(final Priority priority) {
if (this.wrapped.raisePriority(priority)) {
TheChunkSystem.ExecutorGroup.ThreadPoolExecutor.this.notifyPriorityShift();
return true;
}
return false;
}
@Override
public boolean lowerPriority(final Priority priority) {
return this.wrapped.lowerPriority(priority);
}
@Override
public long getSubOrder() {
return this.wrapped.getSubOrder();
}
@Override
public boolean setSubOrder(final long subOrder) {
return this.wrapped.setSubOrder(subOrder);
}
@Override
public boolean raiseSubOrder(final long subOrder) {
return this.wrapped.raiseSubOrder(subOrder);
}
@Override
public boolean lowerSubOrder(final long subOrder) {
return this.wrapped.lowerSubOrder(subOrder);
}
@Override
public boolean setPriorityAndSubOrder(final Priority priority, final long subOrder) {
if (this.wrapped.setPriorityAndSubOrder(priority, subOrder)) {
TheChunkSystem.ExecutorGroup.ThreadPoolExecutor.this.notifyPriorityShift();
return true;
}
return false;
}
}
}
}
}

View File

@@ -1,64 +0,0 @@
package org.bxteam.divinemc.util;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import net.minecraft.world.level.levelgen.structure.structures.WoodlandMansionPieces;
public class ConcurrentFlagMatrix extends WoodlandMansionPieces.SimpleGrid {
private final ReentrantReadWriteLock readWriteLock = new ReentrantReadWriteLock();
public ConcurrentFlagMatrix(int rows, int columns, int fallbackValue) {
super(rows, columns, fallbackValue);
}
public void set(int row, int column, int value) {
this.readWriteLock.writeLock().lock();
try {
super.set(row, column, value);
} finally {
this.readWriteLock.writeLock().unlock();
}
}
public void set(int startRow, int startColumn, int endRow, int endColumn, int value) {
this.readWriteLock.writeLock().lock();
try {
super.set(startRow, startColumn, endRow, endColumn, value);
} finally {
this.readWriteLock.writeLock().unlock();
}
}
public int get(int row, int column) {
this.readWriteLock.readLock().lock();
int result;
try {
result = super.get(row, column);
} finally {
this.readWriteLock.readLock().unlock();
}
return result;
}
public void setIf(int row, int column, int expectedValue, int newValue) {
if (this.get(row, column) == expectedValue) {
this.set(row, column, newValue);
}
}
public boolean edgesTo(int row, int column, int value) {
this.readWriteLock.readLock().lock();
boolean result;
try {
result = super.edgesTo(row, column, value);
} finally {
this.readWriteLock.readLock().unlock();
}
return result;
}
}

View File

@@ -1,40 +0,0 @@
package org.bxteam.divinemc.util;
import net.minecraft.util.Mth;
import net.minecraft.util.RandomSource;
import net.minecraft.world.level.levelgen.LegacyRandomSource;
import net.minecraft.world.level.levelgen.PositionalRandomFactory;
import net.minecraft.world.level.levelgen.SingleThreadedRandomSource;
import net.minecraft.world.level.levelgen.Xoroshiro128PlusPlus;
import net.minecraft.world.level.levelgen.XoroshiroRandomSource;
import org.jetbrains.annotations.NotNull;
public final class RandomUtil {
public static @NotNull RandomSource getRandom(PositionalRandomFactory deriver) {
if (deriver instanceof XoroshiroRandomSource.XoroshiroPositionalRandomFactory) {
return new XoroshiroRandomSource(0L, 0L);
}
if (deriver instanceof LegacyRandomSource.LegacyPositionalRandomFactory) {
return new SingleThreadedRandomSource(0L);
}
throw new IllegalArgumentException();
}
private static final ThreadLocal<XoroshiroRandomSource> xoroshiro = ThreadLocal.withInitial(() -> new XoroshiroRandomSource(0L, 0L));
private static final ThreadLocal<SingleThreadedRandomSource> simple = ThreadLocal.withInitial(() -> new SingleThreadedRandomSource(0L));
public static void derive(PositionalRandomFactory deriver, RandomSource random, int x, int y, int z) {
if (deriver instanceof final XoroshiroRandomSource.XoroshiroPositionalRandomFactory deriver1) {
final Xoroshiro128PlusPlus implementation = ((XoroshiroRandomSource) random).randomNumberGenerator;
implementation.seedLo = (Mth.getSeed(x, y, z) ^ deriver1.seedLo());
implementation.seedHi = (deriver1.seedHi());
return;
}
if (deriver instanceof LegacyRandomSource.LegacyPositionalRandomFactory(long seed)) {
final SingleThreadedRandomSource random1 = (SingleThreadedRandomSource) random;
random1.setSeed(Mth.getSeed(x, y, z) ^ seed);
return;
}
throw new IllegalArgumentException();
}
}

View File

@@ -1,40 +0,0 @@
package org.bxteam.divinemc.util;
import com.mojang.datafixers.util.Pair;
import com.mojang.serialization.Codec;
import com.mojang.serialization.DataResult;
import com.mojang.serialization.DynamicOps;
import java.util.concurrent.locks.ReentrantLock;
public class SynchronizedCodec<A> implements Codec<A> {
private final ReentrantLock lock = new ReentrantLock(false);
private final Codec<A> delegate;
public SynchronizedCodec(Codec<A> delegate) {
this.delegate = delegate;
}
@Override
public <T> DataResult<Pair<A, T>> decode(DynamicOps<T> ops, T input) {
try {
lock.lockInterruptibly();
return this.delegate.decode(ops, input);
} catch (InterruptedException e) {
throw new RuntimeException(e);
} finally {
if (lock.isHeldByCurrentThread()) lock.unlock();
}
}
@Override
public <T> DataResult<T> encode(A input, DynamicOps<T> ops, T prefix) {
try {
lock.lockInterruptibly();
return this.delegate.encode(input, ops, prefix);
} catch (InterruptedException e) {
throw new RuntimeException(e);
} finally {
if (lock.isHeldByCurrentThread()) lock.unlock();
}
}
}

View File

@@ -1,12 +0,0 @@
package org.bxteam.divinemc.util;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Consumer;
public interface ThreadBuilder extends Consumer<Thread> {
AtomicInteger id = new AtomicInteger();
default int getAndIncrementId() {
return id.getAndIncrement();
}
}