9
0
mirror of https://github.com/Winds-Studio/Leaf.git synced 2025-12-26 10:29:13 +00:00
Files
Leaf/leaf-server/minecraft-patches/features/0201-Bulk-writes-to-writeLongArray-during-chunk-loading.patch
Dreeam 9a4efaa230 Drop patch that causes performance regression
Originally vanilla logic is to use stream, and Mojang switched it to Guava's Collections2
since 1.21.4. It is much faster than using stream or manually adding to a new ArrayList.
Manually adding to a new ArrayList requires allocating a new object array. However, the Collections2
lazy handles filter condition on iteration, so much better.
2025-08-04 19:25:56 +08:00

62 lines
2.6 KiB
Diff

From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Taiyou06 <kaandindar21@gmail.com>
Date: Fri, 21 Feb 2025 15:06:55 +0100
Subject: [PATCH] Bulk writes to writeLongArray during chunk loading
diff --git a/net/minecraft/network/FriendlyByteBuf.java b/net/minecraft/network/FriendlyByteBuf.java
index 8817aee7eb61e130aacc4f0df980036b92500ad1..a2b2f6f9ea9e3f0802512c62c0e44875816fb748 100644
--- a/net/minecraft/network/FriendlyByteBuf.java
+++ b/net/minecraft/network/FriendlyByteBuf.java
@@ -377,6 +377,50 @@ public class FriendlyByteBuf extends ByteBuf {
}
}
+ // Leaf start - Bulk writes to writeLongArray during chunk loading
+ public static void writeLongArray(FriendlyByteBuf buffer, long[] array) {
+ VarInt.write(buffer, array.length);
+ writeFixedSizeLongArray(buffer, array);
+ }
+
+ public static void writeFixedSizeLongArray(FriendlyByteBuf buffer, long[] array) {
+ if (array.length == 0) {
+ return;
+ }
+ int neededBytes = array.length * Long.BYTES;
+ int maxWritableBytes = buffer.source.maxWritableBytes();
+
+ if (maxWritableBytes >= neededBytes) {
+ buffer.source.ensureWritable(neededBytes);
+ int writerIndex = buffer.source.writerIndex();
+
+ if (buffer.source.hasArray()) {
+ byte[] dest = buffer.source.array();
+ int offset = buffer.source.arrayOffset() + writerIndex;
+
+ ByteBuffer buf = ByteBuffer.wrap(dest, offset, neededBytes).order(buffer.source.order());
+ buf.asLongBuffer().put(array);
+ buffer.source.writerIndex(writerIndex + neededBytes);
+ } else if (buffer.source.nioBufferCount() > 0) {
+ ByteBuffer nioBuf = buffer.source.nioBuffer(writerIndex, neededBytes);
+ nioBuf.asLongBuffer().put(array);
+ buffer.source.writerIndex(writerIndex + neededBytes);
+ } else {
+ ByteBuffer temp = ByteBuffer.allocate(neededBytes).order(buffer.source.order());
+ temp.asLongBuffer().put(array);
+ temp.rewind();
+ buffer.source.writeBytes(temp);
+ }
+ } else {
+ // Not enough space even at max capacity, use traditional approach
+ // which will write each element individually (and handle growing the buffer as needed)
+ for (long l : array) {
+ buffer.writeLong(l);
+ }
+ }
+ }
+ // Leaf end - Bulk writes to writeLongArray during chunk loading
+
public long[] readLongArray() {
return readLongArray(this);
}