mirror of
https://github.com/LeavesMC/Leaves.git
synced 2025-12-21 07:49:35 +00:00
996 lines
48 KiB
Diff
996 lines
48 KiB
Diff
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
From: violetc <58360096+s-yh-china@users.noreply.github.com>
|
|
Date: Sun, 14 Jan 2024 22:22:57 +0800
|
|
Subject: [PATCH] Linear region file format
|
|
|
|
This patch is Powered by LinearPurpur(https://github.com/StupidCraft/LinearPurpur)
|
|
|
|
diff --git a/build.gradle.kts b/build.gradle.kts
|
|
index 3f1316110dd00ae51e2bde8bd87e3b582587b92c..32977ad358c48cc17b0c56f516f74c42336eb38a 100644
|
|
--- a/build.gradle.kts
|
|
+++ b/build.gradle.kts
|
|
@@ -36,6 +36,10 @@ dependencies {
|
|
alsoShade(log4jPlugins.output)
|
|
implementation("io.netty:netty-codec-haproxy:4.1.97.Final") // Paper - Add support for proxy protocol
|
|
// Paper end
|
|
+ // Leaves start - Linear format
|
|
+ implementation("com.github.luben:zstd-jni:1.5.5-11")
|
|
+ implementation("org.lz4:lz4-java:1.8.0")
|
|
+ // Leaves end - Linear format
|
|
implementation("org.apache.logging.log4j:log4j-iostreams:2.19.0") // Paper - remove exclusion
|
|
implementation("org.ow2.asm:asm-commons:9.5")
|
|
implementation("org.spongepowered:configurate-yaml:4.2.0-SNAPSHOT") // Paper - config files
|
|
diff --git a/src/main/java/com/destroystokyo/paper/io/PaperFileIOThread.java b/src/main/java/com/destroystokyo/paper/io/PaperFileIOThread.java
|
|
index f2c27e0ac65be4b75c1d86ef6fd45fdb538d96ac..036da95df5280e547e60d3b4641dad63ec15c224 100644
|
|
--- a/src/main/java/com/destroystokyo/paper/io/PaperFileIOThread.java
|
|
+++ b/src/main/java/com/destroystokyo/paper/io/PaperFileIOThread.java
|
|
@@ -314,8 +314,8 @@ public final class PaperFileIOThread extends QueueExecutorThread {
|
|
public abstract void writeData(final int x, final int z, final CompoundTag compound) throws IOException;
|
|
public abstract CompoundTag readData(final int x, final int z) throws IOException;
|
|
|
|
- public abstract <T> T computeForRegionFile(final int chunkX, final int chunkZ, final Function<RegionFile, T> function);
|
|
- public abstract <T> T computeForRegionFileIfLoaded(final int chunkX, final int chunkZ, final Function<RegionFile, T> function);
|
|
+ public abstract <T> T computeForRegionFile(final int chunkX, final int chunkZ, final Function<top.leavesmc.leaves.region.AbstractRegionFile, T> function); // Leaves
|
|
+ public abstract <T> T computeForRegionFileIfLoaded(final int chunkX, final int chunkZ, final Function<top.leavesmc.leaves.region.AbstractRegionFile, T> function); // Leaves
|
|
|
|
public static final class InProgressWrite {
|
|
public long writeCounter;
|
|
diff --git a/src/main/java/io/papermc/paper/chunk/system/io/RegionFileIOThread.java b/src/main/java/io/papermc/paper/chunk/system/io/RegionFileIOThread.java
|
|
index 8a11e10b01fa012b2f98b1c193c53251e848f909..17057486c031708d3aab82a01031cfef426076da 100644
|
|
--- a/src/main/java/io/papermc/paper/chunk/system/io/RegionFileIOThread.java
|
|
+++ b/src/main/java/io/papermc/paper/chunk/system/io/RegionFileIOThread.java
|
|
@@ -811,7 +811,7 @@ public final class RegionFileIOThread extends PrioritisedQueueExecutorThread {
|
|
final ChunkDataController taskController) {
|
|
final ChunkPos chunkPos = new ChunkPos(chunkX, chunkZ);
|
|
if (intendingToBlock) {
|
|
- return taskController.computeForRegionFile(chunkX, chunkZ, true, (final RegionFile file) -> {
|
|
+ return taskController.computeForRegionFile(chunkX, chunkZ, true, (final top.leavesmc.leaves.region.AbstractRegionFile file) -> { // Leaves
|
|
if (file == null) { // null if no regionfile exists
|
|
return Boolean.FALSE;
|
|
}
|
|
@@ -824,7 +824,7 @@ public final class RegionFileIOThread extends PrioritisedQueueExecutorThread {
|
|
return Boolean.FALSE;
|
|
} // else: it either exists or is not known, fall back to checking the loaded region file
|
|
|
|
- return taskController.computeForRegionFileIfLoaded(chunkX, chunkZ, (final RegionFile file) -> {
|
|
+ return taskController.computeForRegionFileIfLoaded(chunkX, chunkZ, (final top.leavesmc.leaves.region.AbstractRegionFile file) -> { // Leaves
|
|
if (file == null) { // null if not loaded
|
|
// not sure at this point, let the I/O thread figure it out
|
|
return Boolean.TRUE;
|
|
@@ -1126,9 +1126,9 @@ public final class RegionFileIOThread extends PrioritisedQueueExecutorThread {
|
|
return this.getCache().doesRegionFileNotExistNoIO(new ChunkPos(chunkX, chunkZ));
|
|
}
|
|
|
|
- public <T> T computeForRegionFile(final int chunkX, final int chunkZ, final boolean existingOnly, final Function<RegionFile, T> function) {
|
|
+ public <T> T computeForRegionFile(final int chunkX, final int chunkZ, final boolean existingOnly, final Function<top.leavesmc.leaves.region.AbstractRegionFile, T> function) { // Leaves
|
|
final RegionFileStorage cache = this.getCache();
|
|
- final RegionFile regionFile;
|
|
+ final top.leavesmc.leaves.region.AbstractRegionFile regionFile; // Leaves
|
|
synchronized (cache) {
|
|
try {
|
|
regionFile = cache.getRegionFile(new ChunkPos(chunkX, chunkZ), existingOnly, true);
|
|
@@ -1141,19 +1141,19 @@ public final class RegionFileIOThread extends PrioritisedQueueExecutorThread {
|
|
return function.apply(regionFile);
|
|
} finally {
|
|
if (regionFile != null) {
|
|
- regionFile.fileLock.unlock();
|
|
+ regionFile.getFileLock().unlock(); // Leaves
|
|
}
|
|
}
|
|
}
|
|
|
|
- public <T> T computeForRegionFileIfLoaded(final int chunkX, final int chunkZ, final Function<RegionFile, T> function) {
|
|
+ public <T> T computeForRegionFileIfLoaded(final int chunkX, final int chunkZ, final Function<top.leavesmc.leaves.region.AbstractRegionFile, T> function) { // Leaves
|
|
final RegionFileStorage cache = this.getCache();
|
|
- final RegionFile regionFile;
|
|
+ final top.leavesmc.leaves.region.AbstractRegionFile regionFile; // Leaves
|
|
|
|
synchronized (cache) {
|
|
regionFile = cache.getRegionFileIfLoaded(new ChunkPos(chunkX, chunkZ));
|
|
if (regionFile != null) {
|
|
- regionFile.fileLock.lock();
|
|
+ regionFile.getFileLock().lock(); // Leaves
|
|
}
|
|
}
|
|
|
|
@@ -1161,7 +1161,7 @@ public final class RegionFileIOThread extends PrioritisedQueueExecutorThread {
|
|
return function.apply(regionFile);
|
|
} finally {
|
|
if (regionFile != null) {
|
|
- regionFile.fileLock.unlock();
|
|
+ regionFile.getFileLock().unlock(); // Leaves
|
|
}
|
|
}
|
|
}
|
|
diff --git a/src/main/java/net/minecraft/server/level/ChunkMap.java b/src/main/java/net/minecraft/server/level/ChunkMap.java
|
|
index 5dea3d1a33f107959562d64493baffc7dc6dfdd3..983a2cd560921b41bd393bd11b15f0735144d6d3 100644
|
|
--- a/src/main/java/net/minecraft/server/level/ChunkMap.java
|
|
+++ b/src/main/java/net/minecraft/server/level/ChunkMap.java
|
|
@@ -868,13 +868,13 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
|
|
|
|
// Paper start - chunk status cache "api"
|
|
public ChunkStatus getChunkStatusOnDiskIfCached(ChunkPos chunkPos) {
|
|
- net.minecraft.world.level.chunk.storage.RegionFile regionFile = regionFileCache.getRegionFileIfLoaded(chunkPos);
|
|
+ top.leavesmc.leaves.region.AbstractRegionFile regionFile = regionFileCache.getRegionFileIfLoaded(chunkPos); // Leaves
|
|
|
|
return regionFile == null ? null : regionFile.getStatusIfCached(chunkPos.x, chunkPos.z);
|
|
}
|
|
|
|
public ChunkStatus getChunkStatusOnDisk(ChunkPos chunkPos) throws IOException {
|
|
- net.minecraft.world.level.chunk.storage.RegionFile regionFile = regionFileCache.getRegionFile(chunkPos, true);
|
|
+ top.leavesmc.leaves.region.AbstractRegionFile regionFile = regionFileCache.getRegionFile(chunkPos, true); // Leaves
|
|
|
|
if (regionFile == null || !regionFileCache.chunkExists(chunkPos)) {
|
|
return null;
|
|
@@ -892,7 +892,7 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
|
|
}
|
|
|
|
public void updateChunkStatusOnDisk(ChunkPos chunkPos, @Nullable CompoundTag compound) throws IOException {
|
|
- net.minecraft.world.level.chunk.storage.RegionFile regionFile = regionFileCache.getRegionFile(chunkPos, false);
|
|
+ top.leavesmc.leaves.region.AbstractRegionFile regionFile = regionFileCache.getRegionFile(chunkPos, false); // Leaves
|
|
|
|
regionFile.setStatus(chunkPos.x, chunkPos.z, ChunkSerializer.getStatus(compound));
|
|
}
|
|
diff --git a/src/main/java/net/minecraft/util/worldupdate/WorldUpgrader.java b/src/main/java/net/minecraft/util/worldupdate/WorldUpgrader.java
|
|
index f2a7cb6ebed7a4b4019a09af2a025f624f6fe9c9..c54b88834981d3a2a23c862cc54733b2dcd3a654 100644
|
|
--- a/src/main/java/net/minecraft/util/worldupdate/WorldUpgrader.java
|
|
+++ b/src/main/java/net/minecraft/util/worldupdate/WorldUpgrader.java
|
|
@@ -61,7 +61,7 @@ public class WorldUpgrader {
|
|
private volatile int skipped;
|
|
private final Reference2FloatMap<ResourceKey<Level>> progressMap = Reference2FloatMaps.synchronize(new Reference2FloatOpenHashMap());
|
|
private volatile Component status = Component.translatable("optimizeWorld.stage.counting");
|
|
- public static final Pattern REGEX = Pattern.compile("^r\\.(-?[0-9]+)\\.(-?[0-9]+)\\.mca$");
|
|
+ public static Pattern REGEX = Pattern.compile("^r\\.(-?[0-9]+)\\.(-?[0-9]+)\\.(linear | mca)$"); // Leaves
|
|
private final DimensionDataStorage overworldDataStorage;
|
|
|
|
public WorldUpgrader(LevelStorageSource.LevelStorageAccess session, DataFixer dataFixer, Registry<LevelStem> dimensionOptionsRegistry, boolean eraseCache) {
|
|
@@ -235,7 +235,7 @@ public class WorldUpgrader {
|
|
File file = this.levelStorage.getDimensionPath(world).toFile();
|
|
File file1 = new File(file, "region");
|
|
File[] afile = file1.listFiles((file2, s) -> {
|
|
- return s.endsWith(".mca");
|
|
+ return s.endsWith(".mca") || s.endsWith(".linear"); // Leaves
|
|
});
|
|
|
|
if (afile == null) {
|
|
@@ -254,7 +254,10 @@ public class WorldUpgrader {
|
|
int l = Integer.parseInt(matcher.group(2)) << 5;
|
|
|
|
try {
|
|
- RegionFile regionfile = new RegionFile(file2.toPath(), file1.toPath(), true);
|
|
+ // Leaves start
|
|
+ int linearCompression = top.leavesmc.leaves.LeavesConfig.linearCompressionLevel;
|
|
+ top.leavesmc.leaves.region.AbstractRegionFile regionfile = top.leavesmc.leaves.region.AbstractRegionFileFactory.getAbstractRegionFile(linearCompression, file2.toPath(), file1.toPath(), true);
|
|
+ // Leaves end
|
|
|
|
try {
|
|
for (int i1 = 0; i1 < 32; ++i1) {
|
|
diff --git a/src/main/java/net/minecraft/world/level/chunk/storage/RegionFile.java b/src/main/java/net/minecraft/world/level/chunk/storage/RegionFile.java
|
|
index c50d317d63f5ce61788abf449ec59a542b021f50..20b347d80757a702b630d816bbac55a1bea68512 100644
|
|
--- a/src/main/java/net/minecraft/world/level/chunk/storage/RegionFile.java
|
|
+++ b/src/main/java/net/minecraft/world/level/chunk/storage/RegionFile.java
|
|
@@ -26,7 +26,7 @@ import net.minecraft.nbt.NbtIo; // Paper
|
|
import net.minecraft.world.level.ChunkPos;
|
|
import org.slf4j.Logger;
|
|
|
|
-public class RegionFile implements AutoCloseable {
|
|
+public class RegionFile implements AutoCloseable, top.leavesmc.leaves.region.AbstractRegionFile { // Leaves
|
|
|
|
private static final Logger LOGGER = LogUtils.getLogger();
|
|
private static final int SECTOR_BYTES = 4096;
|
|
@@ -50,6 +50,16 @@ public class RegionFile implements AutoCloseable {
|
|
public final java.util.concurrent.locks.ReentrantLock fileLock = new java.util.concurrent.locks.ReentrantLock(); // Paper
|
|
public final Path regionFile; // Paper
|
|
|
|
+ // Leaves start - Abstract getters
|
|
+ public Path getRegionFile() {
|
|
+ return this.regionFile;
|
|
+ }
|
|
+
|
|
+ public java.util.concurrent.locks.ReentrantLock getFileLock() {
|
|
+ return this.fileLock;
|
|
+ }
|
|
+ // Leaves end
|
|
+
|
|
// Paper start - try to recover from RegionFile header corruption
|
|
private static long roundToSectors(long bytes) {
|
|
long sectors = bytes >>> 12; // 4096 = 2^12
|
|
@@ -128,7 +138,7 @@ public class RegionFile implements AutoCloseable {
|
|
}
|
|
|
|
// note: only call for CHUNK regionfiles
|
|
- boolean recalculateHeader() throws IOException {
|
|
+ public boolean recalculateHeader() throws IOException { // Leaves
|
|
if (!this.canRecalcHeader) {
|
|
return false;
|
|
}
|
|
@@ -954,10 +964,10 @@ public class RegionFile implements AutoCloseable {
|
|
private static int getChunkIndex(int x, int z) {
|
|
return (x & 31) + (z & 31) * 32;
|
|
}
|
|
- synchronized boolean isOversized(int x, int z) {
|
|
+ public synchronized boolean isOversized(int x, int z) { // Leaves
|
|
return this.oversized[getChunkIndex(x, z)] == 1;
|
|
}
|
|
- synchronized void setOversized(int x, int z, boolean oversized) throws IOException {
|
|
+ public synchronized void setOversized(int x, int z, boolean oversized) throws IOException { // Leaves
|
|
final int offset = getChunkIndex(x, z);
|
|
boolean previous = this.oversized[offset] == 1;
|
|
this.oversized[offset] = (byte) (oversized ? 1 : 0);
|
|
@@ -996,7 +1006,7 @@ public class RegionFile implements AutoCloseable {
|
|
return this.regionFile.getParent().resolve(this.regionFile.getFileName().toString().replaceAll("\\.mca$", "") + "_oversized_" + x + "_" + z + ".nbt");
|
|
}
|
|
|
|
- synchronized CompoundTag getOversizedData(int x, int z) throws IOException {
|
|
+ public synchronized CompoundTag getOversizedData(int x, int z) throws IOException { // Leaves
|
|
Path file = getOversizedFile(x, z);
|
|
try (DataInputStream out = new DataInputStream(new java.io.BufferedInputStream(new InflaterInputStream(Files.newInputStream(file))))) {
|
|
return NbtIo.read((java.io.DataInput) out);
|
|
diff --git a/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileStorage.java b/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileStorage.java
|
|
index 6bec4549fbcfb68a053300451e25babf8ff38e99..b25f674e68714b11a9ca2a231f470082392641c8 100644
|
|
--- a/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileStorage.java
|
|
+++ b/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileStorage.java
|
|
@@ -21,9 +21,14 @@ public class RegionFileStorage implements AutoCloseable {
|
|
|
|
public static final String ANVIL_EXTENSION = ".mca";
|
|
private static final int MAX_CACHE_SIZE = 256;
|
|
- public final Long2ObjectLinkedOpenHashMap<RegionFile> regionCache = new Long2ObjectLinkedOpenHashMap();
|
|
+ public final Long2ObjectLinkedOpenHashMap<top.leavesmc.leaves.region.AbstractRegionFile> regionCache = new Long2ObjectLinkedOpenHashMap(); // Leaves
|
|
private final Path folder;
|
|
private final boolean sync;
|
|
+ // Leaves start - region format
|
|
+ public final top.leavesmc.leaves.region.RegionFileFormat format;
|
|
+ public final int linearCompression;
|
|
+ public final boolean linearCrashOnBrokenSymlink;
|
|
+ // Leaves end
|
|
private final boolean isChunkData; // Paper
|
|
|
|
// Paper start - cache regionfile does not exist state
|
|
@@ -60,6 +65,11 @@ public class RegionFileStorage implements AutoCloseable {
|
|
this(directory, dsync, false);
|
|
}
|
|
RegionFileStorage(Path directory, boolean dsync, boolean isChunkData) {
|
|
+ // Leaves start
|
|
+ this.format = top.leavesmc.leaves.LeavesConfig.regionFormatName;
|
|
+ this.linearCompression = top.leavesmc.leaves.LeavesConfig.linearCompressionLevel;
|
|
+ this.linearCrashOnBrokenSymlink = top.leavesmc.leaves.LeavesConfig.linearCrashOnBrokenSymlink;
|
|
+ // Leaves end
|
|
this.isChunkData = isChunkData;
|
|
// Paper end - add isChunkData param
|
|
this.folder = directory;
|
|
@@ -70,7 +80,7 @@ public class RegionFileStorage implements AutoCloseable {
|
|
@Nullable
|
|
public static ChunkPos getRegionFileCoordinates(Path file) {
|
|
String fileName = file.getFileName().toString();
|
|
- if (!fileName.startsWith("r.") || !fileName.endsWith(".mca")) {
|
|
+ if (!fileName.startsWith("r.") || !fileName.endsWith(".mca") || !fileName.endsWith(".linear")) { // Leaves
|
|
return null;
|
|
}
|
|
|
|
@@ -90,29 +100,49 @@ public class RegionFileStorage implements AutoCloseable {
|
|
}
|
|
}
|
|
|
|
- public synchronized RegionFile getRegionFileIfLoaded(ChunkPos chunkcoordintpair) {
|
|
+ public synchronized top.leavesmc.leaves.region.AbstractRegionFile getRegionFileIfLoaded(ChunkPos chunkcoordintpair) { // Leaves
|
|
return this.regionCache.getAndMoveToFirst(ChunkPos.asLong(chunkcoordintpair.getRegionX(), chunkcoordintpair.getRegionZ()));
|
|
}
|
|
|
|
public synchronized boolean chunkExists(ChunkPos pos) throws IOException {
|
|
- RegionFile regionfile = getRegionFile(pos, true);
|
|
+ top.leavesmc.leaves.region.AbstractRegionFile regionfile = getRegionFile(pos, true); // Leaves
|
|
|
|
return regionfile != null ? regionfile.hasChunk(pos) : false;
|
|
}
|
|
|
|
- public synchronized RegionFile getRegionFile(ChunkPos chunkcoordintpair, boolean existingOnly) throws IOException { // CraftBukkit
|
|
+ // Leaves start
|
|
+ private void guardAgainstBrokenSymlinks(Path path) throws IOException {
|
|
+ if (!linearCrashOnBrokenSymlink) {
|
|
+ return;
|
|
+ }
|
|
+ if (this.format != top.leavesmc.leaves.region.RegionFileFormat.LINEAR) {
|
|
+ return;
|
|
+ }
|
|
+ if (!java.nio.file.Files.isSymbolicLink(path)) {
|
|
+ return;
|
|
+ }
|
|
+ Path link = java.nio.file.Files.readSymbolicLink(path);
|
|
+ if (!java.nio.file.Files.exists(link) || !java.nio.file.Files.isReadable(link)) {
|
|
+ top.leavesmc.leaves.LeavesLogger.LOGGER.log(java.util.logging.Level.SEVERE, "Linear region file {} is a broken symbolic link, crashing to prevent data loss", path);
|
|
+ net.minecraft.server.MinecraftServer.getServer().halt(false);
|
|
+ throw new IOException("Linear region file " + path + " is a broken symbolic link, crashing to prevent data loss");
|
|
+ }
|
|
+ }
|
|
+ // Leaves end
|
|
+
|
|
+ public synchronized top.leavesmc.leaves.region.AbstractRegionFile getRegionFile(ChunkPos chunkcoordintpair, boolean existingOnly) throws IOException { // CraftBukkit // Leaves
|
|
return this.getRegionFile(chunkcoordintpair, existingOnly, false);
|
|
}
|
|
- public synchronized RegionFile getRegionFile(ChunkPos chunkcoordintpair, boolean existingOnly, boolean lock) throws IOException {
|
|
+ public synchronized top.leavesmc.leaves.region.AbstractRegionFile getRegionFile(ChunkPos chunkcoordintpair, boolean existingOnly, boolean lock) throws IOException { // Leaves
|
|
// Paper end
|
|
long i = ChunkPos.asLong(chunkcoordintpair.getRegionX(), chunkcoordintpair.getRegionZ()); final long regionPos = i; // Paper - OBFHELPER
|
|
- RegionFile regionfile = (RegionFile) this.regionCache.getAndMoveToFirst(i);
|
|
+ top.leavesmc.leaves.region.AbstractRegionFile regionfile = this.regionCache.getAndMoveToFirst(i); // Leaves
|
|
|
|
if (regionfile != null) {
|
|
// Paper start
|
|
if (lock) {
|
|
// must be in this synchronized block
|
|
- regionfile.fileLock.lock();
|
|
+ regionfile.getFileLock().lock(); // Leaves
|
|
}
|
|
// Paper end
|
|
return regionfile;
|
|
@@ -123,28 +153,46 @@ public class RegionFileStorage implements AutoCloseable {
|
|
}
|
|
// Paper end - cache regionfile does not exist state
|
|
if (this.regionCache.size() >= io.papermc.paper.configuration.GlobalConfiguration.get().misc.regionFileCacheSize) { // Paper - configurable
|
|
- ((RegionFile) this.regionCache.removeLast()).close();
|
|
+ this.regionCache.removeLast().close(); // Leaves
|
|
}
|
|
|
|
// Paper - only create directory if not existing only - moved down
|
|
Path path = this.folder;
|
|
int j = chunkcoordintpair.getRegionX();
|
|
- Path path1 = path.resolve("r." + j + "." + chunkcoordintpair.getRegionZ() + ".mca"); // Paper - diff on change
|
|
- if (existingOnly && !java.nio.file.Files.exists(path1)) { // Paper start - cache regionfile does not exist state
|
|
- this.markNonExisting(regionPos);
|
|
- return null; // CraftBukkit
|
|
+ // Leaves start - Polyglot
|
|
+ Path path1;
|
|
+ if (existingOnly) {
|
|
+ Path anvil = path.resolve("r." + j + "." + chunkcoordintpair.getRegionZ() + ".mca");
|
|
+ Path linear = path.resolve("r." + j + "." + chunkcoordintpair.getRegionZ() + ".linear");
|
|
+ guardAgainstBrokenSymlinks(linear);
|
|
+ if (java.nio.file.Files.exists(anvil)) {
|
|
+ path1 = anvil;
|
|
+ } else if (java.nio.file.Files.exists(linear)) {
|
|
+ path1 = linear;
|
|
+ } else {
|
|
+ this.markNonExisting(regionPos);
|
|
+ return null;
|
|
+ }
|
|
+ // Leaves end
|
|
} else {
|
|
+ // Leaves start - Polyglot
|
|
+ String extension = switch (this.format) {
|
|
+ case LINEAR -> "linear";
|
|
+ default -> "mca";
|
|
+ };
|
|
+ path1 = path.resolve("r." + j + "." + chunkcoordintpair.getRegionZ() + "." + extension);
|
|
+ // Leaves end
|
|
+ guardAgainstBrokenSymlinks(path1); // Leaves - Crash on broken symlink
|
|
this.createRegionFile(regionPos);
|
|
}
|
|
// Paper end - cache regionfile does not exist state
|
|
FileUtil.createDirectoriesSafe(this.folder); // Paper - only create directory if not existing only - moved from above
|
|
- RegionFile regionfile1 = new RegionFile(path1, this.folder, this.sync, this.isChunkData); // Paper - allow for chunk regionfiles to regen header
|
|
-
|
|
+ top.leavesmc.leaves.region.AbstractRegionFile regionfile1 = top.leavesmc.leaves.region.AbstractRegionFileFactory.getAbstractRegionFile(this.linearCompression, path1, this.folder, this.sync, this.isChunkData); // Paper - allow for chunk regionfiles to regen header // Leaves
|
|
this.regionCache.putAndMoveToFirst(i, regionfile1);
|
|
// Paper start
|
|
if (lock) {
|
|
// must be in this synchronized block
|
|
- regionfile1.fileLock.lock();
|
|
+ regionfile1.getFileLock().lock(); // Leaves
|
|
}
|
|
// Paper end
|
|
return regionfile1;
|
|
@@ -172,7 +220,7 @@ public class RegionFileStorage implements AutoCloseable {
|
|
}
|
|
|
|
|
|
- private static CompoundTag readOversizedChunk(RegionFile regionfile, ChunkPos chunkCoordinate) throws IOException {
|
|
+ private static CompoundTag readOversizedChunk(top.leavesmc.leaves.region.AbstractRegionFile regionfile, ChunkPos chunkCoordinate) throws IOException { // Leaves
|
|
synchronized (regionfile) {
|
|
try (DataInputStream datainputstream = regionfile.getChunkDataInputStream(chunkCoordinate)) {
|
|
CompoundTag oversizedData = regionfile.getOversizedData(chunkCoordinate.x, chunkCoordinate.z);
|
|
@@ -219,14 +267,14 @@ public class RegionFileStorage implements AutoCloseable {
|
|
@Nullable
|
|
public CompoundTag read(ChunkPos pos) throws IOException {
|
|
// CraftBukkit start - SPIGOT-5680: There's no good reason to preemptively create files on read, save that for writing
|
|
- RegionFile regionfile = this.getRegionFile(pos, true, true); // Paper
|
|
+ top.leavesmc.leaves.region.AbstractRegionFile regionfile = this.getRegionFile(pos, true, true); // Paper // Leaves
|
|
if (regionfile == null) {
|
|
return null;
|
|
}
|
|
// Paper start - Add regionfile parameter
|
|
return this.read(pos, regionfile);
|
|
}
|
|
- public CompoundTag read(ChunkPos pos, RegionFile regionfile) throws IOException {
|
|
+ public CompoundTag read(ChunkPos pos, top.leavesmc.leaves.region.AbstractRegionFile regionfile) throws IOException { // Leaves
|
|
// We add the regionfile parameter to avoid the potential deadlock (on fileLock) if we went back to obtain a regionfile
|
|
// if we decide to re-read
|
|
// Paper end
|
|
@@ -236,7 +284,7 @@ public class RegionFileStorage implements AutoCloseable {
|
|
|
|
// Paper start
|
|
if (regionfile.isOversized(pos.x, pos.z)) {
|
|
- printOversizedLog("Loading Oversized Chunk!", regionfile.regionFile, pos.x, pos.z);
|
|
+ printOversizedLog("Loading Oversized Chunk!", regionfile.getRegionFile(), pos.x, pos.z); // Leaves
|
|
return readOversizedChunk(regionfile, pos);
|
|
}
|
|
// Paper end
|
|
@@ -250,12 +298,12 @@ public class RegionFileStorage implements AutoCloseable {
|
|
if (this.isChunkData) {
|
|
ChunkPos chunkPos = ChunkSerializer.getChunkCoordinate(nbttagcompound);
|
|
if (!chunkPos.equals(pos)) {
|
|
- net.minecraft.server.MinecraftServer.LOGGER.error("Attempting to read chunk data at " + pos + " but got chunk data for " + chunkPos + " instead! Attempting regionfile recalculation for regionfile " + regionfile.regionFile.toAbsolutePath());
|
|
+ net.minecraft.server.MinecraftServer.LOGGER.error("Attempting to read chunk data at " + pos + " but got chunk data for " + chunkPos + " instead! Attempting regionfile recalculation for regionfile " + regionfile.getRegionFile().toAbsolutePath()); // Leaves
|
|
if (regionfile.recalculateHeader()) {
|
|
- regionfile.fileLock.lock(); // otherwise we will unlock twice and only lock once.
|
|
+ regionfile.getFileLock().lock(); // otherwise we will unlock twice and only lock once. // Leaves
|
|
return this.read(pos, regionfile);
|
|
}
|
|
- net.minecraft.server.MinecraftServer.LOGGER.error("Can't recalculate regionfile header, regenerating chunk " + pos + " for " + regionfile.regionFile.toAbsolutePath());
|
|
+ net.minecraft.server.MinecraftServer.LOGGER.error("Can't recalculate regionfile header, regenerating chunk " + pos + " for " + regionfile.getRegionFile().toAbsolutePath()); // Leaves
|
|
return null;
|
|
}
|
|
}
|
|
@@ -289,13 +337,13 @@ public class RegionFileStorage implements AutoCloseable {
|
|
|
|
return nbttagcompound;
|
|
} finally { // Paper start
|
|
- regionfile.fileLock.unlock();
|
|
+ regionfile.getFileLock().unlock(); // Leaves
|
|
} // Paper end
|
|
}
|
|
|
|
public void scanChunk(ChunkPos chunkPos, StreamTagVisitor scanner) throws IOException {
|
|
// CraftBukkit start - SPIGOT-5680: There's no good reason to preemptively create files on read, save that for writing
|
|
- RegionFile regionfile = this.getRegionFile(chunkPos, true);
|
|
+ top.leavesmc.leaves.region.AbstractRegionFile regionfile = this.getRegionFile(chunkPos, true); // Leaves
|
|
if (regionfile == null) {
|
|
return;
|
|
}
|
|
@@ -325,7 +373,7 @@ public class RegionFileStorage implements AutoCloseable {
|
|
}
|
|
|
|
protected void write(ChunkPos pos, @Nullable CompoundTag nbt) throws IOException {
|
|
- RegionFile regionfile = this.getRegionFile(pos, nbt == null, true); // CraftBukkit // Paper // Paper start - rewrite chunk system
|
|
+ top.leavesmc.leaves.region.AbstractRegionFile regionfile = this.getRegionFile(pos, nbt == null, true); // CraftBukkit // Paper // Paper start - rewrite chunk system // Leaves
|
|
if (nbt == null && regionfile == null) {
|
|
return;
|
|
}
|
|
@@ -375,7 +423,7 @@ public class RegionFileStorage implements AutoCloseable {
|
|
}
|
|
// Paper end
|
|
} finally { // Paper start
|
|
- regionfile.fileLock.unlock();
|
|
+ regionfile.getFileLock().unlock(); // Leaves
|
|
} // Paper end
|
|
}
|
|
|
|
@@ -384,7 +432,7 @@ public class RegionFileStorage implements AutoCloseable {
|
|
ObjectIterator objectiterator = this.regionCache.values().iterator();
|
|
|
|
while (objectiterator.hasNext()) {
|
|
- RegionFile regionfile = (RegionFile) objectiterator.next();
|
|
+ top.leavesmc.leaves.region.AbstractRegionFile regionfile = (top.leavesmc.leaves.region.AbstractRegionFile) objectiterator.next(); // Leaves
|
|
|
|
try {
|
|
regionfile.close();
|
|
@@ -400,7 +448,7 @@ public class RegionFileStorage implements AutoCloseable {
|
|
ObjectIterator objectiterator = this.regionCache.values().iterator();
|
|
|
|
while (objectiterator.hasNext()) {
|
|
- RegionFile regionfile = (RegionFile) objectiterator.next();
|
|
+ top.leavesmc.leaves.region.AbstractRegionFile regionfile = (top.leavesmc.leaves.region.AbstractRegionFile) objectiterator.next(); // Leaves
|
|
|
|
regionfile.flush();
|
|
}
|
|
diff --git a/src/main/java/org/bukkit/craftbukkit/CraftWorld.java b/src/main/java/org/bukkit/craftbukkit/CraftWorld.java
|
|
index 2916fb49c69daaa660fc7f53821e8be766226345..433bbdb42ba5a7b4454eca2895dbf9dbf01086d5 100644
|
|
--- a/src/main/java/org/bukkit/craftbukkit/CraftWorld.java
|
|
+++ b/src/main/java/org/bukkit/craftbukkit/CraftWorld.java
|
|
@@ -567,7 +567,7 @@ public class CraftWorld extends CraftRegionAccessor implements World {
|
|
return true;
|
|
}
|
|
|
|
- net.minecraft.world.level.chunk.storage.RegionFile file;
|
|
+ top.leavesmc.leaves.region.AbstractRegionFile file; // Leaves
|
|
try {
|
|
file = world.getChunkSource().chunkMap.regionFileCache.getRegionFile(chunkPos, false);
|
|
} catch (java.io.IOException ex) {
|
|
diff --git a/src/main/java/top/leavesmc/leaves/region/AbstractRegionFile.java b/src/main/java/top/leavesmc/leaves/region/AbstractRegionFile.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..835b6d3726eda2573f616463d744cb5f6233e01c
|
|
--- /dev/null
|
|
+++ b/src/main/java/top/leavesmc/leaves/region/AbstractRegionFile.java
|
|
@@ -0,0 +1,45 @@
|
|
+package top.leavesmc.leaves.region;
|
|
+
|
|
+import net.minecraft.nbt.CompoundTag;
|
|
+import net.minecraft.world.level.ChunkPos;
|
|
+import net.minecraft.world.level.chunk.ChunkStatus;
|
|
+
|
|
+import java.io.DataInputStream;
|
|
+import java.io.DataOutputStream;
|
|
+import java.io.IOException;
|
|
+import java.nio.file.Path;
|
|
+import java.util.concurrent.locks.ReentrantLock;
|
|
+
|
|
+public interface AbstractRegionFile {
|
|
+
|
|
+ void flush() throws IOException;
|
|
+
|
|
+ void clear(ChunkPos pos) throws IOException;
|
|
+
|
|
+ void close() throws IOException;
|
|
+
|
|
+ void setStatus(int x, int z, ChunkStatus status);
|
|
+
|
|
+ void setOversized(int x, int z, boolean b) throws IOException;
|
|
+
|
|
+ boolean hasChunk(ChunkPos pos);
|
|
+
|
|
+ boolean doesChunkExist(ChunkPos pos) throws Exception;
|
|
+
|
|
+ boolean isOversized(int x, int z);
|
|
+
|
|
+ boolean recalculateHeader() throws IOException;
|
|
+
|
|
+ DataOutputStream getChunkDataOutputStream(ChunkPos pos) throws IOException;
|
|
+
|
|
+ DataInputStream getChunkDataInputStream(ChunkPos pos) throws IOException;
|
|
+
|
|
+ CompoundTag getOversizedData(int x, int z) throws IOException;
|
|
+
|
|
+ ChunkStatus getStatusIfCached(int x, int z);
|
|
+
|
|
+ ReentrantLock getFileLock();
|
|
+
|
|
+ Path getRegionFile();
|
|
+}
|
|
+
|
|
diff --git a/src/main/java/top/leavesmc/leaves/region/AbstractRegionFileFactory.java b/src/main/java/top/leavesmc/leaves/region/AbstractRegionFileFactory.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..980744f9b620a19fede7a6c7ce341bf5275a9b6a
|
|
--- /dev/null
|
|
+++ b/src/main/java/top/leavesmc/leaves/region/AbstractRegionFileFactory.java
|
|
@@ -0,0 +1,30 @@
|
|
+package top.leavesmc.leaves.region;
|
|
+
|
|
+import net.minecraft.world.level.chunk.storage.RegionFile;
|
|
+import net.minecraft.world.level.chunk.storage.RegionFileVersion;
|
|
+
|
|
+import java.io.IOException;
|
|
+import java.nio.file.Path;
|
|
+
|
|
+public class AbstractRegionFileFactory {
|
|
+
|
|
+ public static AbstractRegionFile getAbstractRegionFile(int linearCompression, Path file, Path directory, boolean dsync) throws IOException {
|
|
+ return getAbstractRegionFile(linearCompression, file, directory, RegionFileVersion.VERSION_DEFLATE, dsync);
|
|
+ }
|
|
+
|
|
+ public static AbstractRegionFile getAbstractRegionFile(int linearCompression, Path file, Path directory, boolean dsync, boolean canRecalcHeader) throws IOException {
|
|
+ return getAbstractRegionFile(linearCompression, file, directory, RegionFileVersion.VERSION_DEFLATE, dsync, canRecalcHeader);
|
|
+ }
|
|
+
|
|
+ public static AbstractRegionFile getAbstractRegionFile(int linearCompression, Path file, Path directory, RegionFileVersion outputChunkStreamVersion, boolean dsync) throws IOException {
|
|
+ return getAbstractRegionFile(linearCompression, file, directory, outputChunkStreamVersion, dsync, false);
|
|
+ }
|
|
+
|
|
+ public static AbstractRegionFile getAbstractRegionFile(int linearCompression, Path file, Path directory, RegionFileVersion outputChunkStreamVersion, boolean dsync, boolean canRecalcHeader) throws IOException {
|
|
+ if (file.toString().endsWith(".linear")) {
|
|
+ return new LinearRegionFile(file, linearCompression);
|
|
+ } else {
|
|
+ return new RegionFile(file, directory, outputChunkStreamVersion, dsync, canRecalcHeader);
|
|
+ }
|
|
+ }
|
|
+}
|
|
diff --git a/src/main/java/top/leavesmc/leaves/region/LinearRegionFile.java b/src/main/java/top/leavesmc/leaves/region/LinearRegionFile.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..072495e6c0c08a3239faab0fb6ebb28451039694
|
|
--- /dev/null
|
|
+++ b/src/main/java/top/leavesmc/leaves/region/LinearRegionFile.java
|
|
@@ -0,0 +1,328 @@
|
|
+package top.leavesmc.leaves.region;
|
|
+
|
|
+import com.github.luben.zstd.ZstdInputStream;
|
|
+import com.github.luben.zstd.ZstdOutputStream;
|
|
+import com.mojang.logging.LogUtils;
|
|
+import net.jpountz.lz4.LZ4Compressor;
|
|
+import net.jpountz.lz4.LZ4Factory;
|
|
+import net.jpountz.lz4.LZ4FastDecompressor;
|
|
+import net.minecraft.nbt.CompoundTag;
|
|
+import net.minecraft.world.level.ChunkPos;
|
|
+import net.minecraft.world.level.chunk.ChunkStatus;
|
|
+import org.slf4j.Logger;
|
|
+
|
|
+import javax.annotation.Nullable;
|
|
+import java.io.*;
|
|
+import java.nio.ByteBuffer;
|
|
+import java.nio.file.Files;
|
|
+import java.nio.file.Path;
|
|
+import java.nio.file.StandardCopyOption;
|
|
+import java.util.ArrayList;
|
|
+import java.util.Arrays;
|
|
+import java.util.List;
|
|
+import java.util.concurrent.atomic.AtomicBoolean;
|
|
+import java.util.concurrent.locks.ReentrantLock;
|
|
+
|
|
+// Powered by LinearPurpur(https://github.com/StupidCraft/LinearPurpur)
|
|
+public class LinearRegionFile implements AbstractRegionFile, AutoCloseable {
|
|
+
|
|
+ private static final long SUPERBLOCK = -4323716122432332390L;
|
|
+ private static final byte VERSION = 2;
|
|
+ private static final int HEADER_SIZE = 32;
|
|
+ private static final int FOOTER_SIZE = 8;
|
|
+ private static final Logger LOGGER = LogUtils.getLogger();
|
|
+ private static final List<Byte> SUPPORTED_VERSIONS = Arrays.asList((byte) 1, (byte) 2);
|
|
+ private static final LinearRegionFileFlusher linearRegionFileFlusher = new LinearRegionFileFlusher();
|
|
+
|
|
+ private final byte[][] buffer = new byte[1024][];
|
|
+ private final int[] bufferUncompressedSize = new int[1024];
|
|
+
|
|
+ private final int[] chunkTimestamps = new int[1024];
|
|
+ private final ChunkStatus[] statuses = new ChunkStatus[1024];
|
|
+
|
|
+ private final LZ4Compressor compressor;
|
|
+ private final LZ4FastDecompressor decompressor;
|
|
+
|
|
+ public final ReentrantLock fileLock = new ReentrantLock(true);
|
|
+ private final int compressionLevel;
|
|
+
|
|
+ private final AtomicBoolean markedToSave = new AtomicBoolean(false);
|
|
+ public boolean closed = false;
|
|
+ public Path path;
|
|
+
|
|
+
|
|
+ public LinearRegionFile(Path file, int compression) throws IOException {
|
|
+ this.path = file;
|
|
+ this.compressionLevel = compression;
|
|
+ this.compressor = LZ4Factory.fastestInstance().fastCompressor();
|
|
+ this.decompressor = LZ4Factory.fastestInstance().fastDecompressor();
|
|
+
|
|
+ File regionFile = new File(this.path.toString());
|
|
+
|
|
+ Arrays.fill(this.bufferUncompressedSize, 0);
|
|
+
|
|
+ if (!regionFile.canRead()) return;
|
|
+
|
|
+ try (FileInputStream fileStream = new FileInputStream(regionFile);
|
|
+ DataInputStream rawDataStream = new DataInputStream(fileStream)) {
|
|
+
|
|
+ long superBlock = rawDataStream.readLong();
|
|
+ if (superBlock != SUPERBLOCK) {
|
|
+ throw new RuntimeException("Invalid superblock: " + superBlock + " in " + file);
|
|
+ }
|
|
+
|
|
+ byte version = rawDataStream.readByte();
|
|
+ if (!SUPPORTED_VERSIONS.contains(version)) {
|
|
+ throw new RuntimeException("Invalid version: " + version + " in " + file);
|
|
+ }
|
|
+
|
|
+ // Skip newestTimestamp (Long) + Compression level (Byte) + Chunk count (Short): Unused.
|
|
+ rawDataStream.skipBytes(11);
|
|
+
|
|
+ int dataCount = rawDataStream.readInt();
|
|
+ long fileLength = file.toFile().length();
|
|
+ if (fileLength != HEADER_SIZE + dataCount + FOOTER_SIZE) {
|
|
+ throw new IOException("Invalid file length: " + this.path + " " + fileLength + " " + (HEADER_SIZE + dataCount + FOOTER_SIZE));
|
|
+ }
|
|
+
|
|
+ rawDataStream.skipBytes(8); // Skip data hash (Long): Unused.
|
|
+
|
|
+ byte[] rawCompressed = new byte[dataCount];
|
|
+ rawDataStream.readFully(rawCompressed, 0, dataCount);
|
|
+
|
|
+ superBlock = rawDataStream.readLong();
|
|
+ if (superBlock != SUPERBLOCK) {
|
|
+ throw new IOException("Footer superblock invalid " + this.path);
|
|
+ }
|
|
+
|
|
+ try (DataInputStream dataStream = new DataInputStream(new ZstdInputStream(new ByteArrayInputStream(rawCompressed)))) {
|
|
+ int[] starts = new int[1024];
|
|
+ for (int i = 0; i < 1024; i++) {
|
|
+ starts[i] = dataStream.readInt();
|
|
+ dataStream.skipBytes(4); // Skip timestamps (Int): Unused.
|
|
+ }
|
|
+
|
|
+ for (int i = 0; i < 1024; i++) {
|
|
+ if (starts[i] > 0) {
|
|
+ int size = starts[i];
|
|
+ byte[] b = new byte[size];
|
|
+ dataStream.readFully(b, 0, size);
|
|
+
|
|
+ int maxCompressedLength = this.compressor.maxCompressedLength(size);
|
|
+ byte[] compressed = new byte[maxCompressedLength];
|
|
+ int compressedLength = this.compressor.compress(b, 0, size, compressed, 0, maxCompressedLength);
|
|
+ b = new byte[compressedLength];
|
|
+ System.arraycopy(compressed, 0, b, 0, compressedLength);
|
|
+
|
|
+ this.buffer[i] = b;
|
|
+ this.bufferUncompressedSize[i] = size;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public Path getRegionFile() {
|
|
+ return this.path;
|
|
+ }
|
|
+
|
|
+ public ReentrantLock getFileLock() {
|
|
+ return this.fileLock;
|
|
+ }
|
|
+
|
|
+ public void flush() throws IOException {
|
|
+ if (isMarkedToSave()) flushWrapper(); // sync
|
|
+ }
|
|
+
|
|
+ private void markToSave() {
|
|
+ linearRegionFileFlusher.scheduleSave(this);
|
|
+ markedToSave.set(true);
|
|
+ }
|
|
+
|
|
+ public boolean isMarkedToSave() {
|
|
+ return markedToSave.getAndSet(false);
|
|
+ }
|
|
+
|
|
+ public void flushWrapper() {
|
|
+ try {
|
|
+ save();
|
|
+ } catch (IOException e) {
|
|
+ LOGGER.error("Failed to flush region file " + path.toAbsolutePath(), e);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public boolean doesChunkExist(ChunkPos pos) throws Exception {
|
|
+ throw new Exception("doesChunkExist is a stub");
|
|
+ }
|
|
+
|
|
+ private synchronized void save() throws IOException {
|
|
+ long timestamp = getTimestamp();
|
|
+ short chunkCount = 0;
|
|
+
|
|
+ File tempFile = new File(path.toString() + ".tmp");
|
|
+
|
|
+ try (FileOutputStream fileStream = new FileOutputStream(tempFile);
|
|
+ ByteArrayOutputStream zstdByteArray = new ByteArrayOutputStream();
|
|
+ ZstdOutputStream zstdStream = new ZstdOutputStream(zstdByteArray, this.compressionLevel);
|
|
+ DataOutputStream zstdDataStream = new DataOutputStream(zstdStream);
|
|
+ DataOutputStream dataStream = new DataOutputStream(fileStream)) {
|
|
+
|
|
+ dataStream.writeLong(SUPERBLOCK);
|
|
+ dataStream.writeByte(VERSION);
|
|
+ dataStream.writeLong(timestamp);
|
|
+ dataStream.writeByte(this.compressionLevel);
|
|
+
|
|
+ ArrayList<byte[]> byteBuffers = new ArrayList<>();
|
|
+ for (int i = 0; i < 1024; i++) {
|
|
+ if (this.bufferUncompressedSize[i] != 0) {
|
|
+ chunkCount += 1;
|
|
+ byte[] content = new byte[bufferUncompressedSize[i]];
|
|
+ this.decompressor.decompress(buffer[i], 0, content, 0, bufferUncompressedSize[i]);
|
|
+
|
|
+ byteBuffers.add(content);
|
|
+ } else {
|
|
+ byteBuffers.add(null);
|
|
+ }
|
|
+ }
|
|
+ for (int i = 0; i < 1024; i++) {
|
|
+ zstdDataStream.writeInt(this.bufferUncompressedSize[i]); // Write uncompressed size
|
|
+ zstdDataStream.writeInt(this.chunkTimestamps[i]); // Write timestamp
|
|
+ }
|
|
+ for (int i = 0; i < 1024; i++) {
|
|
+ if (byteBuffers.get(i) != null) {
|
|
+ zstdDataStream.write(byteBuffers.get(i), 0, byteBuffers.get(i).length);
|
|
+ }
|
|
+ }
|
|
+ zstdDataStream.close();
|
|
+
|
|
+ dataStream.writeShort(chunkCount);
|
|
+
|
|
+ byte[] compressed = zstdByteArray.toByteArray();
|
|
+
|
|
+ dataStream.writeInt(compressed.length);
|
|
+ dataStream.writeLong(0);
|
|
+
|
|
+ dataStream.write(compressed, 0, compressed.length);
|
|
+ dataStream.writeLong(SUPERBLOCK);
|
|
+
|
|
+ dataStream.flush();
|
|
+ fileStream.getFD().sync();
|
|
+ fileStream.getChannel().force(true); // Ensure atomicity on Btrfs
|
|
+ }
|
|
+ Files.move(tempFile.toPath(), this.path, StandardCopyOption.REPLACE_EXISTING);
|
|
+ }
|
|
+
|
|
+
|
|
+ public void setStatus(int x, int z, ChunkStatus status) {
|
|
+ this.statuses[getChunkIndex(x, z)] = status;
|
|
+ }
|
|
+
|
|
+ public synchronized void write(ChunkPos pos, ByteBuffer buffer) {
|
|
+ try {
|
|
+ byte[] b = toByteArray(new ByteArrayInputStream(buffer.array()));
|
|
+ int uncompressedSize = b.length;
|
|
+
|
|
+ int maxCompressedLength = this.compressor.maxCompressedLength(b.length);
|
|
+ byte[] compressed = new byte[maxCompressedLength];
|
|
+ int compressedLength = this.compressor.compress(b, 0, b.length, compressed, 0, maxCompressedLength);
|
|
+ b = new byte[compressedLength];
|
|
+ System.arraycopy(compressed, 0, b, 0, compressedLength);
|
|
+
|
|
+ int index = getChunkIndex(pos.x, pos.z);
|
|
+ this.buffer[index] = b;
|
|
+ this.chunkTimestamps[index] = getTimestamp();
|
|
+ this.bufferUncompressedSize[getChunkIndex(pos.x, pos.z)] = uncompressedSize;
|
|
+ } catch (IOException e) {
|
|
+ LOGGER.error("Chunk write IOException " + e + " " + this.path);
|
|
+ }
|
|
+ markToSave();
|
|
+ }
|
|
+
|
|
+ public DataOutputStream getChunkDataOutputStream(ChunkPos pos) {
|
|
+ return new DataOutputStream(new BufferedOutputStream(new ChunkBuffer(pos)));
|
|
+ }
|
|
+
|
|
+ private class ChunkBuffer extends ByteArrayOutputStream {
|
|
+
|
|
+ private final ChunkPos pos;
|
|
+
|
|
+ public ChunkBuffer(ChunkPos chunkcoordintpair) {
|
|
+ super();
|
|
+ this.pos = chunkcoordintpair;
|
|
+ }
|
|
+
|
|
+ public void close() {
|
|
+ ByteBuffer bytebuffer = ByteBuffer.wrap(this.buf, 0, this.count);
|
|
+ LinearRegionFile.this.write(this.pos, bytebuffer);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private byte[] toByteArray(InputStream in) throws IOException {
|
|
+ ByteArrayOutputStream out = new ByteArrayOutputStream();
|
|
+ byte[] tempBuffer = new byte[4096];
|
|
+
|
|
+ int length;
|
|
+ while ((length = in.read(tempBuffer)) >= 0) {
|
|
+ out.write(tempBuffer, 0, length);
|
|
+ }
|
|
+
|
|
+ return out.toByteArray();
|
|
+ }
|
|
+
|
|
+ @Nullable
|
|
+ public synchronized DataInputStream getChunkDataInputStream(ChunkPos pos) {
|
|
+ if (this.bufferUncompressedSize[getChunkIndex(pos.x, pos.z)] != 0) {
|
|
+ byte[] content = new byte[bufferUncompressedSize[getChunkIndex(pos.x, pos.z)]];
|
|
+ this.decompressor.decompress(this.buffer[getChunkIndex(pos.x, pos.z)], 0, content, 0, bufferUncompressedSize[getChunkIndex(pos.x, pos.z)]);
|
|
+ return new DataInputStream(new ByteArrayInputStream(content));
|
|
+ }
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ public ChunkStatus getStatusIfCached(int x, int z) {
|
|
+ return this.statuses[getChunkIndex(x, z)];
|
|
+ }
|
|
+
|
|
+ public void clear(ChunkPos pos) {
|
|
+ int i = getChunkIndex(pos.x, pos.z);
|
|
+ this.buffer[i] = null;
|
|
+ this.bufferUncompressedSize[i] = 0;
|
|
+ this.chunkTimestamps[i] = getTimestamp();
|
|
+ markToSave();
|
|
+ }
|
|
+
|
|
+ public boolean hasChunk(ChunkPos pos) {
|
|
+ return this.bufferUncompressedSize[getChunkIndex(pos.x, pos.z)] > 0;
|
|
+ }
|
|
+
|
|
+ public void close() throws IOException {
|
|
+ if (closed) {
|
|
+ return;
|
|
+ }
|
|
+ closed = true;
|
|
+ flush(); // sync
|
|
+ }
|
|
+
|
|
+ private static int getChunkIndex(int x, int z) {
|
|
+ return (x & 31) + ((z & 31) << 5);
|
|
+ }
|
|
+
|
|
+ private static int getTimestamp() {
|
|
+ return (int) (System.currentTimeMillis() / 1000L);
|
|
+ }
|
|
+
|
|
+ public boolean recalculateHeader() {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ public void setOversized(int x, int z, boolean something) {
|
|
+ }
|
|
+
|
|
+ public CompoundTag getOversizedData(int x, int z) throws IOException {
|
|
+ throw new IOException("getOversizedData is a stub " + this.path);
|
|
+ }
|
|
+
|
|
+ public boolean isOversized(int x, int z) {
|
|
+ return false;
|
|
+ }
|
|
+}
|
|
diff --git a/src/main/java/top/leavesmc/leaves/region/LinearRegionFileFlusher.java b/src/main/java/top/leavesmc/leaves/region/LinearRegionFileFlusher.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..f9f70c156ef3dc4e79acd4ce9643f310ac081008
|
|
--- /dev/null
|
|
+++ b/src/main/java/top/leavesmc/leaves/region/LinearRegionFileFlusher.java
|
|
@@ -0,0 +1,52 @@
|
|
+package top.leavesmc.leaves.region;
|
|
+
|
|
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
|
|
+
|
|
+import java.util.Queue;
|
|
+import java.util.concurrent.*;
|
|
+
|
|
+import org.bukkit.Bukkit;
|
|
+import top.leavesmc.leaves.LeavesConfig;
|
|
+
|
|
+// Powered by LinearPurpur(https://github.com/StupidCraft/LinearPurpur)
|
|
+public class LinearRegionFileFlusher {
|
|
+ private final Queue<LinearRegionFile> savingQueue = new LinkedBlockingQueue<>();
|
|
+ private final ScheduledExecutorService scheduler = Executors.newSingleThreadScheduledExecutor(
|
|
+ new ThreadFactoryBuilder()
|
|
+ .setNameFormat("linear-flush-scheduler")
|
|
+ .build()
|
|
+ );
|
|
+ private final ExecutorService executor = Executors.newFixedThreadPool(
|
|
+ LeavesConfig.getLinearFlushThreads(),
|
|
+ new ThreadFactoryBuilder()
|
|
+ .setNameFormat("linear-flusher-%d")
|
|
+ .build()
|
|
+ );
|
|
+
|
|
+ public LinearRegionFileFlusher() {
|
|
+ Bukkit.getLogger().info("Using " + LeavesConfig.getLinearFlushThreads() + " threads for linear region flushing.");
|
|
+ scheduler.scheduleAtFixedRate(this::pollAndFlush, 0L, LeavesConfig.getLinearFlushThreads(), TimeUnit.SECONDS);
|
|
+ }
|
|
+
|
|
+ public void scheduleSave(LinearRegionFile regionFile) {
|
|
+ if (savingQueue.contains(regionFile)) {
|
|
+ return;
|
|
+ }
|
|
+ savingQueue.add(regionFile);
|
|
+ }
|
|
+
|
|
+ private void pollAndFlush() {
|
|
+ while (!savingQueue.isEmpty()) {
|
|
+ LinearRegionFile regionFile = savingQueue.poll();
|
|
+ if (!regionFile.closed && regionFile.isMarkedToSave()) {
|
|
+ executor.execute(regionFile::flushWrapper);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public void shutdown() {
|
|
+ executor.shutdown();
|
|
+ scheduler.shutdown();
|
|
+ }
|
|
+}
|
|
+
|
|
diff --git a/src/main/java/top/leavesmc/leaves/region/RegionFileFormat.java b/src/main/java/top/leavesmc/leaves/region/RegionFileFormat.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..a9a8e79557fac57e29b64e7e55fc04b80ad11ae3
|
|
--- /dev/null
|
|
+++ b/src/main/java/top/leavesmc/leaves/region/RegionFileFormat.java
|
|
@@ -0,0 +1,14 @@
|
|
+package top.leavesmc.leaves.region;
|
|
+
|
|
+public enum RegionFileFormat {
|
|
+ ANVIL, LINEAR, INVALID;
|
|
+
|
|
+ public static RegionFileFormat fromString(String format) {
|
|
+ for (RegionFileFormat regionFileFormat : values()) {
|
|
+ if (regionFileFormat.name().equalsIgnoreCase(format)) {
|
|
+ return regionFileFormat;
|
|
+ }
|
|
+ }
|
|
+ return RegionFileFormat.INVALID;
|
|
+ }
|
|
+}
|