mirror of
https://github.com/LeavesMC/Leaves.git
synced 2025-12-19 14:59:32 +00:00
962 lines
46 KiB
Diff
962 lines
46 KiB
Diff
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
From: violetc <58360096+s-yh-china@users.noreply.github.com>
|
|
Date: Sun, 14 Jan 2024 22:22:57 +0800
|
|
Subject: [PATCH] Linear region file format
|
|
|
|
This patch is Powered by LinearPurpur(https://github.com/StupidCraft/LinearPurpur)
|
|
|
|
diff --git a/build.gradle.kts b/build.gradle.kts
|
|
index 28baa9a192a6fd83563b57a411e9bc905ba6b7e1..de18503a98afca9a271f81bb24cb489d902cfe39 100644
|
|
--- a/build.gradle.kts
|
|
+++ b/build.gradle.kts
|
|
@@ -30,6 +30,10 @@ dependencies {
|
|
alsoShade(log4jPlugins.output)
|
|
implementation("io.netty:netty-codec-haproxy:4.1.97.Final") // Paper - Add support for proxy protocol
|
|
// Paper end
|
|
+ // Leaves start - Linear format
|
|
+ implementation("com.github.luben:zstd-jni:1.5.5-11")
|
|
+ implementation("org.lz4:lz4-java:1.8.0")
|
|
+ // Leaves end - Linear format
|
|
implementation("org.apache.logging.log4j:log4j-iostreams:2.22.1") // Paper - remove exclusion
|
|
implementation("org.ow2.asm:asm-commons:9.7")
|
|
implementation("org.spongepowered:configurate-yaml:4.2.0-SNAPSHOT") // Paper - config files
|
|
diff --git a/src/main/java/io/papermc/paper/chunk/system/io/RegionFileIOThread.java b/src/main/java/io/papermc/paper/chunk/system/io/RegionFileIOThread.java
|
|
index 2096e57c025858519e7c46788993b9aac1ec60e8..d4fa12ce7d2482bf00229e2ea9e25a2d7f59e0ea 100644
|
|
--- a/src/main/java/io/papermc/paper/chunk/system/io/RegionFileIOThread.java
|
|
+++ b/src/main/java/io/papermc/paper/chunk/system/io/RegionFileIOThread.java
|
|
@@ -1077,9 +1077,9 @@ public final class RegionFileIOThread extends PrioritisedQueueExecutorThread {
|
|
return this.getCache().doesRegionFileNotExistNoIO(new ChunkPos(chunkX, chunkZ));
|
|
}
|
|
|
|
- public <T> T computeForRegionFile(final int chunkX, final int chunkZ, final boolean existingOnly, final Function<RegionFile, T> function) {
|
|
+ public <T> T computeForRegionFile(final int chunkX, final int chunkZ, final boolean existingOnly, final Function<org.leavesmc.leaves.region.AbstractRegionFile, T> function) { // Leaves
|
|
final RegionFileStorage cache = this.getCache();
|
|
- final RegionFile regionFile;
|
|
+ final org.leavesmc.leaves.region.AbstractRegionFile regionFile; // Leaves
|
|
synchronized (cache) {
|
|
try {
|
|
regionFile = cache.getRegionFile(new ChunkPos(chunkX, chunkZ), existingOnly, true);
|
|
@@ -1092,19 +1092,19 @@ public final class RegionFileIOThread extends PrioritisedQueueExecutorThread {
|
|
return function.apply(regionFile);
|
|
} finally {
|
|
if (regionFile != null) {
|
|
- regionFile.fileLock.unlock();
|
|
+ regionFile.getFileLock().unlock(); // Leaves
|
|
}
|
|
}
|
|
}
|
|
|
|
- public <T> T computeForRegionFileIfLoaded(final int chunkX, final int chunkZ, final Function<RegionFile, T> function) {
|
|
+ public <T> T computeForRegionFileIfLoaded(final int chunkX, final int chunkZ, final Function<org.leavesmc.leaves.region.AbstractRegionFile, T> function) { // Leaves
|
|
final RegionFileStorage cache = this.getCache();
|
|
- final RegionFile regionFile;
|
|
+ final org.leavesmc.leaves.region.AbstractRegionFile regionFile; // Leaves
|
|
|
|
synchronized (cache) {
|
|
regionFile = cache.getRegionFileIfLoaded(new ChunkPos(chunkX, chunkZ));
|
|
if (regionFile != null) {
|
|
- regionFile.fileLock.lock();
|
|
+ regionFile.getFileLock().lock(); // Leaves
|
|
}
|
|
}
|
|
|
|
@@ -1112,7 +1112,7 @@ public final class RegionFileIOThread extends PrioritisedQueueExecutorThread {
|
|
return function.apply(regionFile);
|
|
} finally {
|
|
if (regionFile != null) {
|
|
- regionFile.fileLock.unlock();
|
|
+ regionFile.getFileLock().unlock(); // Leaves
|
|
}
|
|
}
|
|
}
|
|
diff --git a/src/main/java/net/minecraft/server/level/ChunkMap.java b/src/main/java/net/minecraft/server/level/ChunkMap.java
|
|
index 7dcdc9b40c594234d87bef3e75a68ddaa58506a3..6bbccbc2e114bf31a0ca3341437b7d5f466f858e 100644
|
|
--- a/src/main/java/net/minecraft/server/level/ChunkMap.java
|
|
+++ b/src/main/java/net/minecraft/server/level/ChunkMap.java
|
|
@@ -885,13 +885,13 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
|
|
}
|
|
|
|
public ChunkStatus getChunkStatusOnDiskIfCached(ChunkPos chunkPos) {
|
|
- net.minecraft.world.level.chunk.storage.RegionFile regionFile = regionFileCache.getRegionFileIfLoaded(chunkPos);
|
|
+ org.leavesmc.leaves.region.AbstractRegionFile regionFile = regionFileCache.getRegionFileIfLoaded(chunkPos); // Leaves
|
|
|
|
return regionFile == null ? null : regionFile.getStatusIfCached(chunkPos.x, chunkPos.z);
|
|
}
|
|
|
|
public ChunkStatus getChunkStatusOnDisk(ChunkPos chunkPos) throws IOException {
|
|
- net.minecraft.world.level.chunk.storage.RegionFile regionFile = regionFileCache.getRegionFile(chunkPos, true);
|
|
+ org.leavesmc.leaves.region.AbstractRegionFile regionFile = regionFileCache.getRegionFile(chunkPos, true); // Leaves
|
|
|
|
if (regionFile == null || !regionFileCache.chunkExists(chunkPos)) {
|
|
return null;
|
|
@@ -909,7 +909,7 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
|
|
}
|
|
|
|
public void updateChunkStatusOnDisk(ChunkPos chunkPos, @Nullable CompoundTag compound) throws IOException {
|
|
- net.minecraft.world.level.chunk.storage.RegionFile regionFile = regionFileCache.getRegionFile(chunkPos, false);
|
|
+ org.leavesmc.leaves.region.AbstractRegionFile regionFile = regionFileCache.getRegionFile(chunkPos, false); // Leaves
|
|
|
|
regionFile.setStatus(chunkPos.x, chunkPos.z, ChunkSerializer.getStatus(compound));
|
|
}
|
|
diff --git a/src/main/java/net/minecraft/util/worldupdate/WorldUpgrader.java b/src/main/java/net/minecraft/util/worldupdate/WorldUpgrader.java
|
|
index 954d468459fe167ede0e7fca5b9f99da565d59e1..12055c5e16ddb58d9e3c6c6d13f85facb2b7ac5d 100644
|
|
--- a/src/main/java/net/minecraft/util/worldupdate/WorldUpgrader.java
|
|
+++ b/src/main/java/net/minecraft/util/worldupdate/WorldUpgrader.java
|
|
@@ -76,7 +76,7 @@ public class WorldUpgrader {
|
|
volatile int skipped;
|
|
final Reference2FloatMap<ResourceKey<Level>> progressMap = Reference2FloatMaps.synchronize(new Reference2FloatOpenHashMap());
|
|
volatile Component status = Component.translatable("optimizeWorld.stage.counting");
|
|
- static final Pattern REGEX = Pattern.compile("^r\\.(-?[0-9]+)\\.(-?[0-9]+)\\.mca$");
|
|
+ static final Pattern REGEX = Pattern.compile("^r\\.(-?[0-9]+)\\.(-?[0-9]+)\\.(linear | mca)$"); // Leaves
|
|
final DimensionDataStorage overworldDataStorage;
|
|
|
|
public WorldUpgrader(LevelStorageSource.LevelStorageAccess session, DataFixer dataFixer, RegistryAccess dynamicRegistryManager, boolean eraseCache, boolean recreateRegionFiles) {
|
|
@@ -406,7 +406,7 @@ public class WorldUpgrader {
|
|
|
|
private static List<WorldUpgrader.FileToUpgrade> getAllChunkPositions(RegionStorageInfo key, Path regionDirectory) {
|
|
File[] afile = regionDirectory.toFile().listFiles((file, s) -> {
|
|
- return s.endsWith(".mca");
|
|
+ return s.endsWith(".mca") || s.endsWith(".linear"); // Leaves
|
|
});
|
|
|
|
if (afile == null) {
|
|
diff --git a/src/main/java/net/minecraft/world/level/chunk/storage/RegionFile.java b/src/main/java/net/minecraft/world/level/chunk/storage/RegionFile.java
|
|
index 1362a47943cf1a51a185a15094b1f74c94bf40ef..5dc0631ee8122f1a8473b6b1cf890cb567400e09 100644
|
|
--- a/src/main/java/net/minecraft/world/level/chunk/storage/RegionFile.java
|
|
+++ b/src/main/java/net/minecraft/world/level/chunk/storage/RegionFile.java
|
|
@@ -28,7 +28,7 @@ import net.minecraft.nbt.NbtIo; // Paper
|
|
import net.minecraft.world.level.ChunkPos;
|
|
import org.slf4j.Logger;
|
|
|
|
-public class RegionFile implements AutoCloseable {
|
|
+public class RegionFile implements AutoCloseable, org.leavesmc.leaves.region.AbstractRegionFile { // Leaves
|
|
|
|
private static final Logger LOGGER = LogUtils.getLogger();
|
|
private static final int SECTOR_BYTES = 4096;
|
|
@@ -60,6 +60,16 @@ public class RegionFile implements AutoCloseable {
|
|
return sectors + (sign >>> 63);
|
|
}
|
|
|
|
+ // Leaves start - Abstract getters
|
|
+ public Path getRegionFile() {
|
|
+ return this.path;
|
|
+ }
|
|
+
|
|
+ public java.util.concurrent.locks.ReentrantLock getFileLock() {
|
|
+ return this.fileLock;
|
|
+ }
|
|
+ // Leaves end
|
|
+
|
|
private static final CompoundTag OVERSIZED_COMPOUND = new CompoundTag();
|
|
|
|
private CompoundTag attemptRead(long sector, int chunkDataLength, long fileLength) throws IOException {
|
|
@@ -130,7 +140,7 @@ public class RegionFile implements AutoCloseable {
|
|
}
|
|
|
|
// note: only call for CHUNK regionfiles
|
|
- boolean recalculateHeader() throws IOException {
|
|
+ public boolean recalculateHeader() throws IOException { // Leaves
|
|
if (!this.canRecalcHeader) {
|
|
return false;
|
|
}
|
|
@@ -972,10 +982,10 @@ public class RegionFile implements AutoCloseable {
|
|
private static int getChunkIndex(int x, int z) {
|
|
return (x & 31) + (z & 31) * 32;
|
|
}
|
|
- synchronized boolean isOversized(int x, int z) {
|
|
+ public synchronized boolean isOversized(int x, int z) { // Leaves
|
|
return this.oversized[getChunkIndex(x, z)] == 1;
|
|
}
|
|
- synchronized void setOversized(int x, int z, boolean oversized) throws IOException {
|
|
+ public synchronized void setOversized(int x, int z, boolean oversized) throws IOException { // Leaves
|
|
final int offset = getChunkIndex(x, z);
|
|
boolean previous = this.oversized[offset] == 1;
|
|
this.oversized[offset] = (byte) (oversized ? 1 : 0);
|
|
@@ -1014,7 +1024,7 @@ public class RegionFile implements AutoCloseable {
|
|
return this.path.getParent().resolve(this.path.getFileName().toString().replaceAll("\\.mca$", "") + "_oversized_" + x + "_" + z + ".nbt");
|
|
}
|
|
|
|
- synchronized CompoundTag getOversizedData(int x, int z) throws IOException {
|
|
+ public synchronized CompoundTag getOversizedData(int x, int z) throws IOException { // Leaves
|
|
Path file = getOversizedFile(x, z);
|
|
try (DataInputStream out = new DataInputStream(new java.io.BufferedInputStream(new InflaterInputStream(Files.newInputStream(file))))) {
|
|
return NbtIo.read((java.io.DataInput) out);
|
|
diff --git a/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileStorage.java b/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileStorage.java
|
|
index 1090b7e36e3c1c105bc36135b82751c651f237d4..d24ec065c76f6852586328dd56fba311cf8ac8a8 100644
|
|
--- a/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileStorage.java
|
|
+++ b/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileStorage.java
|
|
@@ -21,11 +21,15 @@ public class RegionFileStorage implements AutoCloseable {
|
|
|
|
public static final String ANVIL_EXTENSION = ".mca";
|
|
private static final int MAX_CACHE_SIZE = 256;
|
|
- public final Long2ObjectLinkedOpenHashMap<RegionFile> regionCache = new Long2ObjectLinkedOpenHashMap();
|
|
+ public final Long2ObjectLinkedOpenHashMap<org.leavesmc.leaves.region.AbstractRegionFile> regionCache = new Long2ObjectLinkedOpenHashMap(); // Leaves
|
|
private final RegionStorageInfo info;
|
|
private final Path folder;
|
|
private final boolean sync;
|
|
private final boolean isChunkData; // Paper
|
|
+ // Leaves start - region format
|
|
+ public final org.leavesmc.leaves.region.RegionFileFormat format;
|
|
+ public final int linearCompression;
|
|
+ // Leaves end
|
|
|
|
// Paper start - cache regionfile does not exist state
|
|
static final int MAX_NON_EXISTING_CACHE = 1024 * 64;
|
|
@@ -66,13 +70,17 @@ public class RegionFileStorage implements AutoCloseable {
|
|
this.folder = directory;
|
|
this.sync = dsync;
|
|
this.info = storageKey;
|
|
+ // Leaves start
|
|
+ this.format = org.leavesmc.leaves.LeavesConfig.regionFormatName;
|
|
+ this.linearCompression = org.leavesmc.leaves.LeavesConfig.linearCompressionLevel;
|
|
+ // Leaves end
|
|
}
|
|
|
|
// Paper start
|
|
@Nullable
|
|
public static ChunkPos getRegionFileCoordinates(Path file) {
|
|
String fileName = file.getFileName().toString();
|
|
- if (!fileName.startsWith("r.") || !fileName.endsWith(".mca")) {
|
|
+ if (!fileName.startsWith("r.") || !fileName.endsWith(".mca") || !fileName.endsWith(".linear")) { // Leaves
|
|
return null;
|
|
}
|
|
|
|
@@ -94,29 +102,29 @@ public class RegionFileStorage implements AutoCloseable {
|
|
// Paper end
|
|
|
|
// Paper start
|
|
- public synchronized RegionFile getRegionFileIfLoaded(ChunkPos chunkcoordintpair) {
|
|
+ public synchronized org.leavesmc.leaves.region.AbstractRegionFile getRegionFileIfLoaded(ChunkPos chunkcoordintpair) { // Leaves
|
|
return this.regionCache.getAndMoveToFirst(ChunkPos.asLong(chunkcoordintpair.getRegionX(), chunkcoordintpair.getRegionZ()));
|
|
}
|
|
|
|
public synchronized boolean chunkExists(ChunkPos pos) throws IOException {
|
|
- RegionFile regionfile = getRegionFile(pos, true);
|
|
+ org.leavesmc.leaves.region.AbstractRegionFile regionfile = getRegionFile(pos, true); // Leaves
|
|
|
|
return regionfile != null ? regionfile.hasChunk(pos) : false;
|
|
}
|
|
|
|
- public synchronized RegionFile getRegionFile(ChunkPos chunkcoordintpair, boolean existingOnly) throws IOException { // CraftBukkit
|
|
+ public synchronized org.leavesmc.leaves.region.AbstractRegionFile getRegionFile(ChunkPos chunkcoordintpair, boolean existingOnly) throws IOException { // CraftBukkit // Leaves
|
|
return this.getRegionFile(chunkcoordintpair, existingOnly, false);
|
|
}
|
|
- public synchronized RegionFile getRegionFile(ChunkPos chunkcoordintpair, boolean existingOnly, boolean lock) throws IOException {
|
|
+ public synchronized org.leavesmc.leaves.region.AbstractRegionFile getRegionFile(ChunkPos chunkcoordintpair, boolean existingOnly, boolean lock) throws IOException { // Leaves
|
|
// Paper end
|
|
long i = ChunkPos.asLong(chunkcoordintpair.getRegionX(), chunkcoordintpair.getRegionZ()); final long regionPos = i; // Paper - OBFHELPER
|
|
- RegionFile regionfile = (RegionFile) this.regionCache.getAndMoveToFirst(i);
|
|
+ org.leavesmc.leaves.region.AbstractRegionFile regionfile = this.regionCache.getAndMoveToFirst(i); // Leaves
|
|
|
|
if (regionfile != null) {
|
|
// Paper start
|
|
if (lock) {
|
|
// must be in this synchronized block
|
|
- regionfile.fileLock.lock();
|
|
+ regionfile.getFileLock().lock(); // Leaves
|
|
}
|
|
// Paper end
|
|
return regionfile;
|
|
@@ -127,28 +135,40 @@ public class RegionFileStorage implements AutoCloseable {
|
|
}
|
|
// Paper end - cache regionfile does not exist state
|
|
if (this.regionCache.size() >= io.papermc.paper.configuration.GlobalConfiguration.get().misc.regionFileCacheSize) { // Paper - Sanitise RegionFileCache and make configurable
|
|
- ((RegionFile) this.regionCache.removeLast()).close();
|
|
+ this.regionCache.removeLast().close(); // Leaves
|
|
}
|
|
|
|
// Paper - only create directory if not existing only - moved down
|
|
Path path = this.folder;
|
|
int j = chunkcoordintpair.getRegionX();
|
|
- Path path1 = path.resolve("r." + j + "." + chunkcoordintpair.getRegionZ() + ".mca"); // Paper - diff on change
|
|
- if (existingOnly && !java.nio.file.Files.exists(path1)) { // Paper start - cache regionfile does not exist state
|
|
- this.markNonExisting(regionPos);
|
|
- return null; // CraftBukkit
|
|
+ // Leaves start - Polyglot
|
|
+ Path path1;
|
|
+ if (existingOnly) {
|
|
+ Path anvil = path.resolve("r." + j + "." + chunkcoordintpair.getRegionZ() + ".mca");
|
|
+ Path linear = path.resolve("r." + j + "." + chunkcoordintpair.getRegionZ() + ".linear");
|
|
+ path1 = java.nio.file.Files.exists(linear) ? linear : java.nio.file.Files.exists(anvil) ? anvil : null;
|
|
+ if (path1 == null) {
|
|
+ markNonExisting(regionPos);
|
|
+ return null; // CraftBukkit
|
|
+ }
|
|
} else {
|
|
+ String extension = switch (this.format) {
|
|
+ case LINEAR -> "linear";
|
|
+ default -> "mca";
|
|
+ };
|
|
+ path1 = path.resolve("r." + j + "." + chunkcoordintpair.getRegionZ() + "." + extension);
|
|
+ // Leaves end - Polyglot
|
|
this.createRegionFile(regionPos);
|
|
}
|
|
// Paper end - cache regionfile does not exist state
|
|
FileUtil.createDirectoriesSafe(this.folder); // Paper - only create directory if not existing only - moved from above
|
|
- RegionFile regionfile1 = new RegionFile(this.info, path1, this.folder, this.sync, this.isChunkData); // Paper - allow for chunk regionfiles to regen header
|
|
+ org.leavesmc.leaves.region.AbstractRegionFile regionfile1 = org.leavesmc.leaves.region.AbstractRegionFileFactory.getAbstractRegionFile(this.linearCompression, this.info, path1, this.folder, this.sync, this.isChunkData); // Paper - allow for chunk regionfiles to regen header // Leaves
|
|
|
|
this.regionCache.putAndMoveToFirst(i, regionfile1);
|
|
// Paper start
|
|
if (lock) {
|
|
// must be in this synchronized block
|
|
- regionfile1.fileLock.lock();
|
|
+ regionfile1.getFileLock().lock(); // Leaves
|
|
}
|
|
// Paper end
|
|
return regionfile1;
|
|
@@ -160,7 +180,7 @@ public class RegionFileStorage implements AutoCloseable {
|
|
org.apache.logging.log4j.LogManager.getLogger().fatal(msg + " (" + file.toString().replaceAll(".+[\\\\/]", "") + " - " + x + "," + z + ") Go clean it up to remove this message. /minecraft:tp " + (x<<4)+" 128 "+(z<<4) + " - DO NOT REPORT THIS TO PAPER - You may ask for help on Discord, but do not file an issue. These error messages can not be removed.");
|
|
}
|
|
|
|
- private static CompoundTag readOversizedChunk(RegionFile regionfile, ChunkPos chunkCoordinate) throws IOException {
|
|
+ private static CompoundTag readOversizedChunk(org.leavesmc.leaves.region.AbstractRegionFile regionfile, ChunkPos chunkCoordinate) throws IOException { // Leaves
|
|
synchronized (regionfile) {
|
|
try (DataInputStream datainputstream = regionfile.getChunkDataInputStream(chunkCoordinate)) {
|
|
CompoundTag oversizedData = regionfile.getOversizedData(chunkCoordinate.x, chunkCoordinate.z);
|
|
@@ -195,14 +215,14 @@ public class RegionFileStorage implements AutoCloseable {
|
|
@Nullable
|
|
public CompoundTag read(ChunkPos pos) throws IOException {
|
|
// CraftBukkit start - SPIGOT-5680: There's no good reason to preemptively create files on read, save that for writing
|
|
- RegionFile regionfile = this.getRegionFile(pos, true, true); // Paper
|
|
+ org.leavesmc.leaves.region.AbstractRegionFile regionfile = this.getRegionFile(pos, true, true); // Paper // Leaves
|
|
if (regionfile == null) {
|
|
return null;
|
|
}
|
|
// Paper start - Add regionfile parameter
|
|
return this.read(pos, regionfile);
|
|
}
|
|
- public CompoundTag read(ChunkPos pos, RegionFile regionfile) throws IOException {
|
|
+ public CompoundTag read(ChunkPos pos, org.leavesmc.leaves.region.AbstractRegionFile regionfile) throws IOException { // Leaves
|
|
// We add the regionfile parameter to avoid the potential deadlock (on fileLock) if we went back to obtain a regionfile
|
|
// if we decide to re-read
|
|
// Paper end
|
|
@@ -212,7 +232,7 @@ public class RegionFileStorage implements AutoCloseable {
|
|
|
|
// Paper start
|
|
if (regionfile.isOversized(pos.x, pos.z)) {
|
|
- printOversizedLog("Loading Oversized Chunk!", regionfile.getPath(), pos.x, pos.z);
|
|
+ printOversizedLog("Loading Oversized Chunk!", regionfile.getRegionFile(), pos.x, pos.z); // Leaves
|
|
return readOversizedChunk(regionfile, pos);
|
|
}
|
|
// Paper end
|
|
@@ -226,12 +246,12 @@ public class RegionFileStorage implements AutoCloseable {
|
|
if (this.isChunkData) {
|
|
ChunkPos chunkPos = ChunkSerializer.getChunkCoordinate(nbttagcompound);
|
|
if (!chunkPos.equals(pos)) {
|
|
- net.minecraft.server.MinecraftServer.LOGGER.error("Attempting to read chunk data at " + pos + " but got chunk data for " + chunkPos + " instead! Attempting regionfile recalculation for regionfile " + regionfile.getPath().toAbsolutePath());
|
|
+ net.minecraft.server.MinecraftServer.LOGGER.error("Attempting to read chunk data at " + pos + " but got chunk data for " + chunkPos + " instead! Attempting regionfile recalculation for regionfile " + regionfile.getRegionFile().toAbsolutePath()); // Leaves
|
|
if (regionfile.recalculateHeader()) {
|
|
- regionfile.fileLock.lock(); // otherwise we will unlock twice and only lock once.
|
|
+ regionfile.getFileLock().lock(); // otherwise we will unlock twice and only lock once. // Leaves
|
|
return this.read(pos, regionfile);
|
|
}
|
|
- net.minecraft.server.MinecraftServer.LOGGER.error("Can't recalculate regionfile header, regenerating chunk " + pos + " for " + regionfile.getPath().toAbsolutePath());
|
|
+ net.minecraft.server.MinecraftServer.LOGGER.error("Can't recalculate regionfile header, regenerating chunk " + pos + " for " + regionfile.getRegionFile().toAbsolutePath()); // Leaves
|
|
return null;
|
|
}
|
|
}
|
|
@@ -265,13 +285,13 @@ public class RegionFileStorage implements AutoCloseable {
|
|
|
|
return nbttagcompound;
|
|
} finally { // Paper start
|
|
- regionfile.fileLock.unlock();
|
|
+ regionfile.getFileLock().unlock(); // Leaves
|
|
} // Paper end
|
|
}
|
|
|
|
public void scanChunk(ChunkPos chunkPos, StreamTagVisitor scanner) throws IOException {
|
|
// CraftBukkit start - SPIGOT-5680: There's no good reason to preemptively create files on read, save that for writing
|
|
- RegionFile regionfile = this.getRegionFile(chunkPos, true);
|
|
+ org.leavesmc.leaves.region.AbstractRegionFile regionfile = this.getRegionFile(chunkPos, true); // Leaves
|
|
if (regionfile == null) {
|
|
return;
|
|
}
|
|
@@ -302,7 +322,7 @@ public class RegionFileStorage implements AutoCloseable {
|
|
|
|
protected void write(ChunkPos pos, @Nullable CompoundTag nbt) throws IOException {
|
|
// Paper start - rewrite chunk system
|
|
- RegionFile regionfile = this.getRegionFile(pos, nbt == null, true); // CraftBukkit
|
|
+ org.leavesmc.leaves.region.AbstractRegionFile regionfile = this.getRegionFile(pos, nbt == null, true); // CraftBukkit // Paper // Paper start - rewrite chunk system // Leaves
|
|
if (nbt == null && regionfile == null) {
|
|
return;
|
|
}
|
|
@@ -317,8 +337,33 @@ public class RegionFileStorage implements AutoCloseable {
|
|
if (nbt == null) {
|
|
regionfile.clear(pos);
|
|
} else {
|
|
- DataOutputStream dataoutputstream = regionfile.getChunkDataOutputStream(pos);
|
|
+ // Leaves start - auto convert anvil to linear
|
|
+ DataOutputStream dataoutputstream;
|
|
+
|
|
+ if(regionfile instanceof RegionFile && org.leavesmc.leaves.LeavesConfig.regionFormatName == org.leavesmc.leaves.region.RegionFileFormat.LINEAR && org.leavesmc.leaves.LeavesConfig.autoConvertAnvilToLinear) {
|
|
+ Path linearFilePath = Path.of(regionfile.getRegionFile().toString().replaceAll(".mca", ".linear"));
|
|
+ try (org.leavesmc.leaves.region.LinearRegionFile linearRegionFile = new org.leavesmc.leaves.region.LinearRegionFile(linearFilePath, org.leavesmc.leaves.LeavesConfig.linearCompressionLevel)) {
|
|
+ DataInputStream regionDataInputStream = regionfile.getChunkDataInputStream(pos);
|
|
+ if (regionDataInputStream == null) {
|
|
+ continue;
|
|
+ }
|
|
|
|
+ CompoundTag compoundTag = NbtIo.read(regionDataInputStream);
|
|
+ try (DataOutputStream linearDataOutputStream = linearRegionFile.getChunkDataOutputStream(pos)) {
|
|
+ NbtIo.write(compoundTag, linearDataOutputStream);
|
|
+ }
|
|
+
|
|
+ linearRegionFile.flush();
|
|
+ if(java.nio.file.Files.isRegularFile(regionfile.getRegionFile())) {
|
|
+ java.nio.file.Files.delete(regionfile.getRegionFile());
|
|
+ }
|
|
+
|
|
+ dataoutputstream = linearRegionFile.getChunkDataOutputStream(pos);
|
|
+ }
|
|
+ } else {
|
|
+ dataoutputstream = regionfile.getChunkDataOutputStream(pos);
|
|
+ }
|
|
+ // leaves end - auto convert anvil to linear
|
|
try {
|
|
NbtIo.write(nbt, (DataOutput) dataoutputstream);
|
|
regionfile.setStatus(pos.x, pos.z, ChunkSerializer.getStatus(nbt)); // Paper - Cache chunk status
|
|
@@ -357,7 +402,7 @@ public class RegionFileStorage implements AutoCloseable {
|
|
// Paper end - Chunk save reattempt
|
|
// Paper start - rewrite chunk system
|
|
} finally {
|
|
- regionfile.fileLock.unlock();
|
|
+ regionfile.getFileLock().unlock(); // Leaves
|
|
}
|
|
// Paper end - rewrite chunk system
|
|
}
|
|
@@ -367,7 +412,7 @@ public class RegionFileStorage implements AutoCloseable {
|
|
ObjectIterator objectiterator = this.regionCache.values().iterator();
|
|
|
|
while (objectiterator.hasNext()) {
|
|
- RegionFile regionfile = (RegionFile) objectiterator.next();
|
|
+ org.leavesmc.leaves.region.AbstractRegionFile regionfile = (org.leavesmc.leaves.region.AbstractRegionFile) objectiterator.next(); // Leaves
|
|
|
|
try {
|
|
regionfile.close();
|
|
@@ -383,7 +428,7 @@ public class RegionFileStorage implements AutoCloseable {
|
|
ObjectIterator objectiterator = this.regionCache.values().iterator();
|
|
|
|
while (objectiterator.hasNext()) {
|
|
- RegionFile regionfile = (RegionFile) objectiterator.next();
|
|
+ org.leavesmc.leaves.region.AbstractRegionFile regionfile = (org.leavesmc.leaves.region.AbstractRegionFile) objectiterator.next(); // Leaves
|
|
|
|
regionfile.flush();
|
|
}
|
|
diff --git a/src/main/java/org/bukkit/craftbukkit/CraftWorld.java b/src/main/java/org/bukkit/craftbukkit/CraftWorld.java
|
|
index 105a1fb70a7b869b65617a760b8de1ea86f3571f..1288be2fa570ff304c70c8da6af8a6d862c5bdb7 100644
|
|
--- a/src/main/java/org/bukkit/craftbukkit/CraftWorld.java
|
|
+++ b/src/main/java/org/bukkit/craftbukkit/CraftWorld.java
|
|
@@ -609,7 +609,7 @@ public class CraftWorld extends CraftRegionAccessor implements World {
|
|
world.getChunk(x, z); // make sure we're at ticket level 32 or lower
|
|
return true;
|
|
}
|
|
- net.minecraft.world.level.chunk.storage.RegionFile file;
|
|
+ org.leavesmc.leaves.region.AbstractRegionFile file; // Leaves
|
|
try {
|
|
file = world.getChunkSource().chunkMap.regionFileCache.getRegionFile(chunkPos, false);
|
|
} catch (java.io.IOException ex) {
|
|
diff --git a/src/main/java/org/leavesmc/leaves/region/AbstractRegionFile.java b/src/main/java/org/leavesmc/leaves/region/AbstractRegionFile.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..2116a2e7e10e64aad1373171c6efde65de6ca198
|
|
--- /dev/null
|
|
+++ b/src/main/java/org/leavesmc/leaves/region/AbstractRegionFile.java
|
|
@@ -0,0 +1,45 @@
|
|
+package org.leavesmc.leaves.region;
|
|
+
|
|
+import net.minecraft.nbt.CompoundTag;
|
|
+import net.minecraft.world.level.ChunkPos;
|
|
+import net.minecraft.world.level.chunk.status.ChunkStatus;
|
|
+
|
|
+import java.io.DataInputStream;
|
|
+import java.io.DataOutputStream;
|
|
+import java.io.IOException;
|
|
+import java.nio.file.Path;
|
|
+import java.util.concurrent.locks.ReentrantLock;
|
|
+
|
|
+public interface AbstractRegionFile {
|
|
+
|
|
+ void flush() throws IOException;
|
|
+
|
|
+ void clear(ChunkPos pos) throws IOException;
|
|
+
|
|
+ void close() throws IOException;
|
|
+
|
|
+ void setStatus(int x, int z, ChunkStatus status);
|
|
+
|
|
+ void setOversized(int x, int z, boolean b) throws IOException;
|
|
+
|
|
+ boolean hasChunk(ChunkPos pos);
|
|
+
|
|
+ boolean doesChunkExist(ChunkPos pos) throws Exception;
|
|
+
|
|
+ boolean isOversized(int x, int z);
|
|
+
|
|
+ boolean recalculateHeader() throws IOException;
|
|
+
|
|
+ DataOutputStream getChunkDataOutputStream(ChunkPos pos) throws IOException;
|
|
+
|
|
+ DataInputStream getChunkDataInputStream(ChunkPos pos) throws IOException;
|
|
+
|
|
+ CompoundTag getOversizedData(int x, int z) throws IOException;
|
|
+
|
|
+ ChunkStatus getStatusIfCached(int x, int z);
|
|
+
|
|
+ ReentrantLock getFileLock();
|
|
+
|
|
+ Path getRegionFile();
|
|
+}
|
|
+
|
|
diff --git a/src/main/java/org/leavesmc/leaves/region/AbstractRegionFileFactory.java b/src/main/java/org/leavesmc/leaves/region/AbstractRegionFileFactory.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..8050b23fd624d2462200b621a6d711b144b2b46b
|
|
--- /dev/null
|
|
+++ b/src/main/java/org/leavesmc/leaves/region/AbstractRegionFileFactory.java
|
|
@@ -0,0 +1,31 @@
|
|
+package org.leavesmc.leaves.region;
|
|
+
|
|
+import net.minecraft.world.level.chunk.storage.RegionFile;
|
|
+import net.minecraft.world.level.chunk.storage.RegionFileVersion;
|
|
+import net.minecraft.world.level.chunk.storage.RegionStorageInfo;
|
|
+
|
|
+import java.io.IOException;
|
|
+import java.nio.file.Path;
|
|
+
|
|
+public class AbstractRegionFileFactory {
|
|
+
|
|
+ public static AbstractRegionFile getAbstractRegionFile(int linearCompression, RegionStorageInfo storageKey, Path file, Path directory, boolean dsync) throws IOException {
|
|
+ return getAbstractRegionFile(linearCompression, storageKey, file, directory, RegionFileVersion.VERSION_DEFLATE, dsync);
|
|
+ }
|
|
+
|
|
+ public static AbstractRegionFile getAbstractRegionFile(int linearCompression, RegionStorageInfo storageKey, Path file, Path directory, boolean dsync, boolean canRecalcHeader) throws IOException {
|
|
+ return getAbstractRegionFile(linearCompression, storageKey, file, directory, RegionFileVersion.VERSION_DEFLATE, dsync, canRecalcHeader);
|
|
+ }
|
|
+
|
|
+ public static AbstractRegionFile getAbstractRegionFile(int linearCompression, RegionStorageInfo storageKey, Path file, Path directory, RegionFileVersion outputChunkStreamVersion, boolean dsync) throws IOException {
|
|
+ return getAbstractRegionFile(linearCompression, storageKey, file, directory, outputChunkStreamVersion, dsync, false);
|
|
+ }
|
|
+
|
|
+ public static AbstractRegionFile getAbstractRegionFile(int linearCompression, RegionStorageInfo storageKey, Path file, Path directory, RegionFileVersion outputChunkStreamVersion, boolean dsync, boolean canRecalcHeader) throws IOException {
|
|
+ if (file.toString().endsWith(".linear")) {
|
|
+ return new LinearRegionFile(file, linearCompression);
|
|
+ } else {
|
|
+ return new RegionFile(storageKey, file, directory, outputChunkStreamVersion, dsync, canRecalcHeader);
|
|
+ }
|
|
+ }
|
|
+}
|
|
diff --git a/src/main/java/org/leavesmc/leaves/region/LinearRegionFile.java b/src/main/java/org/leavesmc/leaves/region/LinearRegionFile.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..dd57eea25d8a52b298c49e47f06108cbf8150160
|
|
--- /dev/null
|
|
+++ b/src/main/java/org/leavesmc/leaves/region/LinearRegionFile.java
|
|
@@ -0,0 +1,330 @@
|
|
+package org.leavesmc.leaves.region;
|
|
+
|
|
+import com.github.luben.zstd.ZstdInputStream;
|
|
+import com.github.luben.zstd.ZstdOutputStream;
|
|
+import com.mojang.logging.LogUtils;
|
|
+import net.jpountz.lz4.LZ4Compressor;
|
|
+import net.jpountz.lz4.LZ4Factory;
|
|
+import net.jpountz.lz4.LZ4FastDecompressor;
|
|
+import net.minecraft.nbt.CompoundTag;
|
|
+import net.minecraft.world.level.ChunkPos;
|
|
+import net.minecraft.world.level.chunk.status.ChunkStatus;
|
|
+import org.slf4j.Logger;
|
|
+
|
|
+import javax.annotation.Nullable;
|
|
+import java.io.*;
|
|
+import java.nio.ByteBuffer;
|
|
+import java.nio.file.Files;
|
|
+import java.nio.file.Path;
|
|
+import java.nio.file.StandardCopyOption;
|
|
+import java.util.ArrayList;
|
|
+import java.util.Arrays;
|
|
+import java.util.List;
|
|
+import java.util.concurrent.atomic.AtomicBoolean;
|
|
+import java.util.concurrent.locks.ReentrantLock;
|
|
+
|
|
+// Powered by LinearPurpur(https://github.com/StupidCraft/LinearPurpur)
|
|
+public class LinearRegionFile implements AbstractRegionFile, AutoCloseable {
|
|
+
|
|
+ private static final long SUPERBLOCK = -4323716122432332390L;
|
|
+ private static final byte VERSION = 2;
|
|
+ private static final int HEADER_SIZE = 32;
|
|
+ private static final int FOOTER_SIZE = 8;
|
|
+ private static final Logger LOGGER = LogUtils.getLogger();
|
|
+ private static final List<Byte> SUPPORTED_VERSIONS = Arrays.asList((byte) 1, (byte) 2);
|
|
+ private static final LinearRegionFileFlusher linearRegionFileFlusher = new LinearRegionFileFlusher();
|
|
+
|
|
+ private final byte[][] buffer = new byte[1024][];
|
|
+ private final int[] bufferUncompressedSize = new int[1024];
|
|
+
|
|
+ private final int[] chunkTimestamps = new int[1024];
|
|
+ private final ChunkStatus[] statuses = new ChunkStatus[1024];
|
|
+
|
|
+ private final LZ4Compressor compressor;
|
|
+ private final LZ4FastDecompressor decompressor;
|
|
+
|
|
+ public final ReentrantLock fileLock = new ReentrantLock(true);
|
|
+ private final int compressionLevel;
|
|
+
|
|
+ private final AtomicBoolean markedToSave = new AtomicBoolean(false);
|
|
+ public boolean closed = false;
|
|
+ public Path path;
|
|
+
|
|
+
|
|
+ public LinearRegionFile(Path file, int compression) throws IOException {
|
|
+ this.path = file;
|
|
+ this.compressionLevel = compression;
|
|
+ this.compressor = LZ4Factory.fastestInstance().fastCompressor();
|
|
+ this.decompressor = LZ4Factory.fastestInstance().fastDecompressor();
|
|
+
|
|
+ File regionFile = new File(this.path.toString());
|
|
+
|
|
+ Arrays.fill(this.bufferUncompressedSize, 0);
|
|
+
|
|
+ if (!regionFile.canRead()) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ try (FileInputStream fileStream = new FileInputStream(regionFile);
|
|
+ DataInputStream rawDataStream = new DataInputStream(fileStream)) {
|
|
+
|
|
+ long superBlock = rawDataStream.readLong();
|
|
+ if (superBlock != SUPERBLOCK) {
|
|
+ throw new RuntimeException("Invalid superblock: " + superBlock + " in " + file);
|
|
+ }
|
|
+
|
|
+ byte version = rawDataStream.readByte();
|
|
+ if (!SUPPORTED_VERSIONS.contains(version)) {
|
|
+ throw new RuntimeException("Invalid version: " + version + " in " + file);
|
|
+ }
|
|
+
|
|
+ // Skip newestTimestamp (Long) + Compression level (Byte) + Chunk count (Short): Unused.
|
|
+ rawDataStream.skipBytes(11);
|
|
+
|
|
+ int dataCount = rawDataStream.readInt();
|
|
+ long fileLength = file.toFile().length();
|
|
+ if (fileLength != HEADER_SIZE + dataCount + FOOTER_SIZE) {
|
|
+ throw new IOException("Invalid file length: " + this.path + " " + fileLength + " " + (HEADER_SIZE + dataCount + FOOTER_SIZE));
|
|
+ }
|
|
+
|
|
+ rawDataStream.skipBytes(8); // Skip data hash (Long): Unused.
|
|
+
|
|
+ byte[] rawCompressed = new byte[dataCount];
|
|
+ rawDataStream.readFully(rawCompressed, 0, dataCount);
|
|
+
|
|
+ superBlock = rawDataStream.readLong();
|
|
+ if (superBlock != SUPERBLOCK) {
|
|
+ throw new IOException("Footer superblock invalid " + this.path);
|
|
+ }
|
|
+
|
|
+ try (DataInputStream dataStream = new DataInputStream(new ZstdInputStream(new ByteArrayInputStream(rawCompressed)))) {
|
|
+ int[] starts = new int[1024];
|
|
+ for (int i = 0; i < 1024; i++) {
|
|
+ starts[i] = dataStream.readInt();
|
|
+ dataStream.skipBytes(4); // Skip timestamps (Int): Unused.
|
|
+ }
|
|
+
|
|
+ for (int i = 0; i < 1024; i++) {
|
|
+ if (starts[i] > 0) {
|
|
+ int size = starts[i];
|
|
+ byte[] b = new byte[size];
|
|
+ dataStream.readFully(b, 0, size);
|
|
+
|
|
+ int maxCompressedLength = this.compressor.maxCompressedLength(size);
|
|
+ byte[] compressed = new byte[maxCompressedLength];
|
|
+ int compressedLength = this.compressor.compress(b, 0, size, compressed, 0, maxCompressedLength);
|
|
+ b = new byte[compressedLength];
|
|
+ System.arraycopy(compressed, 0, b, 0, compressedLength);
|
|
+
|
|
+ this.buffer[i] = b;
|
|
+ this.bufferUncompressedSize[i] = size;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public Path getRegionFile() {
|
|
+ return this.path;
|
|
+ }
|
|
+
|
|
+ public ReentrantLock getFileLock() {
|
|
+ return this.fileLock;
|
|
+ }
|
|
+
|
|
+ public void flush() throws IOException {
|
|
+ if (isMarkedToSave()) flushWrapper(); // sync
|
|
+ }
|
|
+
|
|
+ private void markToSave() {
|
|
+ linearRegionFileFlusher.scheduleSave(this);
|
|
+ markedToSave.set(true);
|
|
+ }
|
|
+
|
|
+ public boolean isMarkedToSave() {
|
|
+ return markedToSave.getAndSet(false);
|
|
+ }
|
|
+
|
|
+ public void flushWrapper() {
|
|
+ try {
|
|
+ save();
|
|
+ } catch (IOException e) {
|
|
+ LOGGER.error("Failed to flush region file " + path.toAbsolutePath(), e);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public boolean doesChunkExist(ChunkPos pos) throws Exception {
|
|
+ throw new Exception("doesChunkExist is a stub");
|
|
+ }
|
|
+
|
|
+ private synchronized void save() throws IOException {
|
|
+ long timestamp = getTimestamp();
|
|
+ short chunkCount = 0;
|
|
+
|
|
+ File tempFile = new File(path.toString() + ".tmp");
|
|
+
|
|
+ try (FileOutputStream fileStream = new FileOutputStream(tempFile);
|
|
+ ByteArrayOutputStream zstdByteArray = new ByteArrayOutputStream();
|
|
+ ZstdOutputStream zstdStream = new ZstdOutputStream(zstdByteArray, this.compressionLevel);
|
|
+ DataOutputStream zstdDataStream = new DataOutputStream(zstdStream);
|
|
+ DataOutputStream dataStream = new DataOutputStream(fileStream)) {
|
|
+
|
|
+ dataStream.writeLong(SUPERBLOCK);
|
|
+ dataStream.writeByte(VERSION);
|
|
+ dataStream.writeLong(timestamp);
|
|
+ dataStream.writeByte(this.compressionLevel);
|
|
+
|
|
+ ArrayList<byte[]> byteBuffers = new ArrayList<>();
|
|
+ for (int i = 0; i < 1024; i++) {
|
|
+ if (this.bufferUncompressedSize[i] != 0) {
|
|
+ chunkCount += 1;
|
|
+ byte[] content = new byte[bufferUncompressedSize[i]];
|
|
+ this.decompressor.decompress(buffer[i], 0, content, 0, bufferUncompressedSize[i]);
|
|
+
|
|
+ byteBuffers.add(content);
|
|
+ } else {
|
|
+ byteBuffers.add(null);
|
|
+ }
|
|
+ }
|
|
+ for (int i = 0; i < 1024; i++) {
|
|
+ zstdDataStream.writeInt(this.bufferUncompressedSize[i]); // Write uncompressed size
|
|
+ zstdDataStream.writeInt(this.chunkTimestamps[i]); // Write timestamp
|
|
+ }
|
|
+ for (int i = 0; i < 1024; i++) {
|
|
+ if (byteBuffers.get(i) != null) {
|
|
+ zstdDataStream.write(byteBuffers.get(i), 0, byteBuffers.get(i).length);
|
|
+ }
|
|
+ }
|
|
+ zstdDataStream.close();
|
|
+
|
|
+ dataStream.writeShort(chunkCount);
|
|
+
|
|
+ byte[] compressed = zstdByteArray.toByteArray();
|
|
+
|
|
+ dataStream.writeInt(compressed.length);
|
|
+ dataStream.writeLong(0);
|
|
+
|
|
+ dataStream.write(compressed, 0, compressed.length);
|
|
+ dataStream.writeLong(SUPERBLOCK);
|
|
+
|
|
+ dataStream.flush();
|
|
+ fileStream.getFD().sync();
|
|
+ fileStream.getChannel().force(true); // Ensure atomicity on Btrfs
|
|
+ }
|
|
+ Files.move(tempFile.toPath(), this.path, StandardCopyOption.REPLACE_EXISTING);
|
|
+ }
|
|
+
|
|
+
|
|
+ public void setStatus(int x, int z, ChunkStatus status) {
|
|
+ this.statuses[getChunkIndex(x, z)] = status;
|
|
+ }
|
|
+
|
|
+ public synchronized void write(ChunkPos pos, ByteBuffer buffer) {
|
|
+ try {
|
|
+ byte[] b = toByteArray(new ByteArrayInputStream(buffer.array()));
|
|
+ int uncompressedSize = b.length;
|
|
+
|
|
+ int maxCompressedLength = this.compressor.maxCompressedLength(b.length);
|
|
+ byte[] compressed = new byte[maxCompressedLength];
|
|
+ int compressedLength = this.compressor.compress(b, 0, b.length, compressed, 0, maxCompressedLength);
|
|
+ b = new byte[compressedLength];
|
|
+ System.arraycopy(compressed, 0, b, 0, compressedLength);
|
|
+
|
|
+ int index = getChunkIndex(pos.x, pos.z);
|
|
+ this.buffer[index] = b;
|
|
+ this.chunkTimestamps[index] = getTimestamp();
|
|
+ this.bufferUncompressedSize[getChunkIndex(pos.x, pos.z)] = uncompressedSize;
|
|
+ } catch (IOException e) {
|
|
+ LOGGER.error("Chunk write IOException " + e + " " + this.path);
|
|
+ }
|
|
+ markToSave();
|
|
+ }
|
|
+
|
|
+ public DataOutputStream getChunkDataOutputStream(ChunkPos pos) {
|
|
+ return new DataOutputStream(new BufferedOutputStream(new ChunkBuffer(pos)));
|
|
+ }
|
|
+
|
|
+ private class ChunkBuffer extends ByteArrayOutputStream {
|
|
+
|
|
+ private final ChunkPos pos;
|
|
+
|
|
+ public ChunkBuffer(ChunkPos chunkcoordintpair) {
|
|
+ super();
|
|
+ this.pos = chunkcoordintpair;
|
|
+ }
|
|
+
|
|
+ public void close() {
|
|
+ ByteBuffer bytebuffer = ByteBuffer.wrap(this.buf, 0, this.count);
|
|
+ LinearRegionFile.this.write(this.pos, bytebuffer);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private byte[] toByteArray(InputStream in) throws IOException {
|
|
+ ByteArrayOutputStream out = new ByteArrayOutputStream();
|
|
+ byte[] tempBuffer = new byte[4096];
|
|
+
|
|
+ int length;
|
|
+ while ((length = in.read(tempBuffer)) >= 0) {
|
|
+ out.write(tempBuffer, 0, length);
|
|
+ }
|
|
+
|
|
+ return out.toByteArray();
|
|
+ }
|
|
+
|
|
+ @Nullable
|
|
+ public synchronized DataInputStream getChunkDataInputStream(ChunkPos pos) {
|
|
+ if (this.bufferUncompressedSize[getChunkIndex(pos.x, pos.z)] != 0) {
|
|
+ byte[] content = new byte[bufferUncompressedSize[getChunkIndex(pos.x, pos.z)]];
|
|
+ this.decompressor.decompress(this.buffer[getChunkIndex(pos.x, pos.z)], 0, content, 0, bufferUncompressedSize[getChunkIndex(pos.x, pos.z)]);
|
|
+ return new DataInputStream(new ByteArrayInputStream(content));
|
|
+ }
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ public ChunkStatus getStatusIfCached(int x, int z) {
|
|
+ return this.statuses[getChunkIndex(x, z)];
|
|
+ }
|
|
+
|
|
+ public void clear(ChunkPos pos) {
|
|
+ int i = getChunkIndex(pos.x, pos.z);
|
|
+ this.buffer[i] = null;
|
|
+ this.bufferUncompressedSize[i] = 0;
|
|
+ this.chunkTimestamps[i] = getTimestamp();
|
|
+ markToSave();
|
|
+ }
|
|
+
|
|
+ public boolean hasChunk(ChunkPos pos) {
|
|
+ return this.bufferUncompressedSize[getChunkIndex(pos.x, pos.z)] > 0;
|
|
+ }
|
|
+
|
|
+ public void close() throws IOException {
|
|
+ if (closed) {
|
|
+ return;
|
|
+ }
|
|
+ closed = true;
|
|
+ flush(); // sync
|
|
+ }
|
|
+
|
|
+ private static int getChunkIndex(int x, int z) {
|
|
+ return (x & 31) + ((z & 31) << 5);
|
|
+ }
|
|
+
|
|
+ private static int getTimestamp() {
|
|
+ return (int) (System.currentTimeMillis() / 1000L);
|
|
+ }
|
|
+
|
|
+ public boolean recalculateHeader() {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ public void setOversized(int x, int z, boolean something) {
|
|
+ }
|
|
+
|
|
+ public CompoundTag getOversizedData(int x, int z) throws IOException {
|
|
+ throw new IOException("getOversizedData is a stub " + this.path);
|
|
+ }
|
|
+
|
|
+ public boolean isOversized(int x, int z) {
|
|
+ return false;
|
|
+ }
|
|
+}
|
|
diff --git a/src/main/java/org/leavesmc/leaves/region/LinearRegionFileFlusher.java b/src/main/java/org/leavesmc/leaves/region/LinearRegionFileFlusher.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..d452d704c54b211be6becd41d12862ba33553c1f
|
|
--- /dev/null
|
|
+++ b/src/main/java/org/leavesmc/leaves/region/LinearRegionFileFlusher.java
|
|
@@ -0,0 +1,53 @@
|
|
+package org.leavesmc.leaves.region;
|
|
+
|
|
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
|
|
+
|
|
+import java.util.Queue;
|
|
+import java.util.concurrent.*;
|
|
+
|
|
+import org.bukkit.Bukkit;
|
|
+import org.leavesmc.leaves.LeavesConfig;
|
|
+import org.leavesmc.leaves.LeavesLogger;
|
|
+
|
|
+// Powered by LinearPurpur(https://github.com/StupidCraft/LinearPurpur)
|
|
+public class LinearRegionFileFlusher {
|
|
+ private final Queue<LinearRegionFile> savingQueue = new LinkedBlockingQueue<>();
|
|
+ private final ScheduledExecutorService scheduler = Executors.newSingleThreadScheduledExecutor(
|
|
+ new ThreadFactoryBuilder()
|
|
+ .setNameFormat("linear-flush-scheduler")
|
|
+ .build()
|
|
+ );
|
|
+ private final ExecutorService executor = Executors.newFixedThreadPool(
|
|
+ LeavesConfig.getLinearFlushThreads(),
|
|
+ new ThreadFactoryBuilder()
|
|
+ .setNameFormat("linear-flusher-%d")
|
|
+ .build()
|
|
+ );
|
|
+
|
|
+ public LinearRegionFileFlusher() {
|
|
+ LeavesLogger.LOGGER.info("Using " + LeavesConfig.getLinearFlushThreads() + " threads for linear region flushing.");
|
|
+ scheduler.scheduleAtFixedRate(this::pollAndFlush, 0L, LeavesConfig.linearFlushFrequency, TimeUnit.SECONDS);
|
|
+ }
|
|
+
|
|
+ public void scheduleSave(LinearRegionFile regionFile) {
|
|
+ if (savingQueue.contains(regionFile)) {
|
|
+ return;
|
|
+ }
|
|
+ savingQueue.add(regionFile);
|
|
+ }
|
|
+
|
|
+ private void pollAndFlush() {
|
|
+ while (!savingQueue.isEmpty()) {
|
|
+ LinearRegionFile regionFile = savingQueue.poll();
|
|
+ if (!regionFile.closed && regionFile.isMarkedToSave()) {
|
|
+ executor.execute(regionFile::flushWrapper);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public void shutdown() {
|
|
+ executor.shutdown();
|
|
+ scheduler.shutdown();
|
|
+ }
|
|
+}
|
|
+
|
|
diff --git a/src/main/java/org/leavesmc/leaves/region/RegionFileFormat.java b/src/main/java/org/leavesmc/leaves/region/RegionFileFormat.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..3651246acf3dd786eb6a85c7a846a248962cdd7f
|
|
--- /dev/null
|
|
+++ b/src/main/java/org/leavesmc/leaves/region/RegionFileFormat.java
|
|
@@ -0,0 +1,14 @@
|
|
+package org.leavesmc.leaves.region;
|
|
+
|
|
+public enum RegionFileFormat {
|
|
+ ANVIL, LINEAR, INVALID;
|
|
+
|
|
+ public static RegionFileFormat fromString(String format) {
|
|
+ for (RegionFileFormat regionFileFormat : values()) {
|
|
+ if (regionFileFormat.name().equalsIgnoreCase(format)) {
|
|
+ return regionFileFormat;
|
|
+ }
|
|
+ }
|
|
+ return RegionFileFormat.INVALID;
|
|
+ }
|
|
+}
|