diff --git a/leaf-server/minecraft-patches/features/0128-Improve-sorting-in-SortedArraySet.patch b/leaf-server/minecraft-patches/features/0127-Improve-sorting-in-SortedArraySet.patch similarity index 100% rename from leaf-server/minecraft-patches/features/0128-Improve-sorting-in-SortedArraySet.patch rename to leaf-server/minecraft-patches/features/0127-Improve-sorting-in-SortedArraySet.patch diff --git a/leaf-server/minecraft-patches/features/0127-Optimize-AABB.patch b/leaf-server/minecraft-patches/features/0127-Optimize-AABB.patch deleted file mode 100644 index cfc2bc58..00000000 --- a/leaf-server/minecraft-patches/features/0127-Optimize-AABB.patch +++ /dev/null @@ -1,78 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Taiyou06 -Date: Sun, 16 Feb 2025 19:03:23 +0100 -Subject: [PATCH] Optimize AABB - -Pretty minor stuff but, it improves AABB.intersect by around ~5% - -diff --git a/net/minecraft/world/phys/AABB.java b/net/minecraft/world/phys/AABB.java -index f64c04b32dd2d0fe143fc8bf9f498e52beb66a58..00daaff66bd26e9ca15a7eb4052ff38f9e662f7b 100644 ---- a/net/minecraft/world/phys/AABB.java -+++ b/net/minecraft/world/phys/AABB.java -@@ -220,13 +220,16 @@ public class AABB { - } - - public AABB intersect(AABB other) { -- double max = Math.max(this.minX, other.minX); -- double max1 = Math.max(this.minY, other.minY); -- double max2 = Math.max(this.minZ, other.minZ); -- double min = Math.min(this.maxX, other.maxX); -- double min1 = Math.min(this.maxY, other.maxY); -- double min2 = Math.min(this.maxZ, other.maxZ); -- return new AABB(max, max1, max2, min, min1, min2); -+ // Leaf start - Optimize AABB -+ return new AABB( -+ this.minX > other.minX ? this.minX : other.minX, -+ this.minY > other.minY ? this.minY : other.minY, -+ this.minZ > other.minZ ? this.minZ : other.minZ, -+ this.maxX < other.maxX ? this.maxX : other.maxX, -+ this.maxY < other.maxY ? this.maxY : other.maxY, -+ this.maxZ < other.maxZ ? this.maxZ : other.maxZ -+ ); -+ // Leaf end - Optimize AABB - } - - public AABB minmax(AABB other) { -@@ -258,16 +261,39 @@ public class AABB { - } - - public boolean intersects(AABB other) { -- return this.intersects(other.minX, other.minY, other.minZ, other.maxX, other.maxY, other.maxZ); -+ // Leaf start - Optimize AABB -+ // Removed redundant method call overhead -+ return this.minX < other.maxX && -+ this.maxX > other.minX && -+ this.minY < other.maxY && -+ this.maxY > other.minY && -+ this.minZ < other.maxZ && -+ this.maxZ > other.minZ; -+ // Leaf end - Optimize AABB - } - - public boolean intersects(double x1, double y1, double z1, double x2, double y2, double z2) { -- return this.minX < x2 && this.maxX > x1 && this.minY < y2 && this.maxY > y1 && this.minZ < z2 && this.maxZ > z1; -+ // Leaf start - Optimize AABB -+ // No temporary variables needed, direct comparison -+ return this.minX < x2 && -+ this.maxX > x1 && -+ this.minY < y2 && -+ this.maxY > y1 && -+ this.minZ < z2 && -+ this.maxZ > z1; -+ // Leaf end - Optimize AABB - } - - public boolean intersects(Vec3 min, Vec3 max) { - return this.intersects( -- Math.min(min.x, max.x), Math.min(min.y, max.y), Math.min(min.z, max.z), Math.max(min.x, max.x), Math.max(min.y, max.y), Math.max(min.z, max.z) -+ // Leaf start - Optimize AABB -+ min.x < max.x ? min.x : max.x, -+ min.y < max.y ? min.y : max.y, -+ min.z < max.z ? min.z : max.z, -+ min.x > max.x ? min.x : max.x, -+ min.y > max.y ? min.y : max.y, -+ min.z > max.z ? min.z : max.z -+ // Leaf end - Optimize AABB - ); - } - diff --git a/leaf-server/minecraft-patches/features/0129-Make-removeIf-slightly-faster.patch b/leaf-server/minecraft-patches/features/0128-Make-removeIf-slightly-faster.patch similarity index 100% rename from leaf-server/minecraft-patches/features/0129-Make-removeIf-slightly-faster.patch rename to leaf-server/minecraft-patches/features/0128-Make-removeIf-slightly-faster.patch diff --git a/leaf-server/minecraft-patches/features/0130-Optimize-LinearPalette.patch b/leaf-server/minecraft-patches/features/0129-Optimize-LinearPalette.patch similarity index 100% rename from leaf-server/minecraft-patches/features/0130-Optimize-LinearPalette.patch rename to leaf-server/minecraft-patches/features/0129-Optimize-LinearPalette.patch diff --git a/leaf-server/minecraft-patches/features/0131-Slightly-optimized-VarInt-write.patch b/leaf-server/minecraft-patches/features/0130-Slightly-optimized-VarInt-write.patch similarity index 100% rename from leaf-server/minecraft-patches/features/0131-Slightly-optimized-VarInt-write.patch rename to leaf-server/minecraft-patches/features/0130-Slightly-optimized-VarInt-write.patch diff --git a/leaf-server/minecraft-patches/features/0132-Rewrite-ClientboundLightUpdatePacketData.patch b/leaf-server/minecraft-patches/features/0131-Rewrite-ClientboundLightUpdatePacketData.patch similarity index 100% rename from leaf-server/minecraft-patches/features/0132-Rewrite-ClientboundLightUpdatePacketData.patch rename to leaf-server/minecraft-patches/features/0131-Rewrite-ClientboundLightUpdatePacketData.patch diff --git a/leaf-server/minecraft-patches/features/0136-Async-chunk-sending.patch b/leaf-server/minecraft-patches/features/0132-Async-chunk-sending.patch similarity index 100% rename from leaf-server/minecraft-patches/features/0136-Async-chunk-sending.patch rename to leaf-server/minecraft-patches/features/0132-Async-chunk-sending.patch diff --git a/leaf-server/minecraft-patches/features/0133-Some-Optimizations-on-SerializableChunkData.patch b/leaf-server/minecraft-patches/features/0133-Some-Optimizations-on-SerializableChunkData.patch deleted file mode 100644 index 126a1d8c..00000000 --- a/leaf-server/minecraft-patches/features/0133-Some-Optimizations-on-SerializableChunkData.patch +++ /dev/null @@ -1,84 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Taiyou06 -Date: Tue, 25 Feb 2025 21:13:54 +0100 -Subject: [PATCH] Some Optimizations on SerializableChunkData - - -diff --git a/net/minecraft/world/level/chunk/storage/SerializableChunkData.java b/net/minecraft/world/level/chunk/storage/SerializableChunkData.java -index 6b6aaeca14178b5b709e20ae13552d42217f15c0..c0939c311c554a4660b80725294663bab7915733 100644 ---- a/net/minecraft/world/level/chunk/storage/SerializableChunkData.java -+++ b/net/minecraft/world/level/chunk/storage/SerializableChunkData.java -@@ -502,14 +502,16 @@ public record SerializableChunkData( - throw new IllegalArgumentException("Chunk can't be serialized: " + chunk); - } else { - ChunkPos pos = chunk.getPos(); -- List list = new ArrayList<>(); final List sectionsList = list; // Paper - starlight - OBFHELPER -- LevelChunkSection[] sections = chunk.getSections(); -- LevelLightEngine lightEngine = level.getChunkSource().getLightEngine(); - - // Paper start - starlight - final int minLightSection = ca.spottedleaf.moonrise.common.util.WorldUtil.getMinLightSection(level); - final int maxLightSection = ca.spottedleaf.moonrise.common.util.WorldUtil.getMaxLightSection(level); - final int minBlockSection = ca.spottedleaf.moonrise.common.util.WorldUtil.getMinSection(level); -+ // Leaf start - Some Optimizations on SerializableChunkData -+ // Pre-allocate with correct capacity to avoid resizing -+ final int expectedSectionCount = maxLightSection - minLightSection + 1; -+ List list = new ArrayList<>(expectedSectionCount); -+ // Leaf end - Some Optimizations on SerializableChunkData - - final LevelChunkSection[] chunkSections = chunk.getSections(); - final ca.spottedleaf.moonrise.patches.starlight.light.SWMRNibbleArray[] blockNibbles = ((ca.spottedleaf.moonrise.patches.starlight.chunk.StarlightChunk)chunk).starlight$getBlockNibbles(); -@@ -541,10 +543,11 @@ public record SerializableChunkData( - ((ca.spottedleaf.moonrise.patches.starlight.storage.StarlightSectionData)(Object)sectionData).starlight$setSkyLightState(skyNibble.state); - } - -- sectionsList.add(sectionData); -+ list.add(sectionData); // Leaf - Some Optimizations on SerializableChunkData - } - // Paper end - starlight - -+ // Pre-allocate block entities list with exact size needed - List list1 = new ArrayList<>(chunk.getBlockEntitiesPos().size()); - - for (BlockPos blockPos : chunk.getBlockEntitiesPos()) { -@@ -554,7 +557,16 @@ public record SerializableChunkData( - } - } - -- List list2 = new ArrayList<>(); -+ // Leaf start - Some Optimizations on SerializableChunkData -+ // For entities, use an initial estimated capacity if it's a ProtoChunk -+ int entityEstimate = 64; // Reasonable default size -+ if (chunk.getPersistedStatus().getChunkType() == ChunkType.PROTOCHUNK) { -+ ProtoChunk protoChunk = (ProtoChunk)chunk; -+ entityEstimate = Math.max(16, protoChunk.getEntities().size()); -+ } -+ List list2 = new ArrayList<>(entityEstimate); -+ // Leaf end - Some Optimizations on SerializableChunkData -+ - long[] longs = null; - if (chunk.getPersistedStatus().getChunkType() == ChunkType.PROTOCHUNK) { - ProtoChunk protoChunk = (ProtoChunk)chunk; -@@ -570,14 +582,18 @@ public record SerializableChunkData( - for (Entry entry : chunk.getHeightmaps()) { - if (chunk.getPersistedStatus().heightmapsAfter().contains(entry.getKey())) { - long[] rawData = entry.getValue().getRawData(); -- map.put(entry.getKey(), (long[])rawData.clone()); -+ map.put(entry.getKey(), Arrays.copyOf(rawData, rawData.length)); // Leaf - Some Optimizations on SerializableChunkData - } - } - - ChunkAccess.PackedTicks ticksForSerialization = chunk.getTicksForSerialization(level.getGameTime()); -- ShortList[] lists = Arrays.stream(chunk.getPostProcessing()) -- .map(list3 -> list3 != null ? new ShortArrayList(list3) : null) -- .toArray(ShortList[]::new); -+ // Leaf start - Some Optimizations on SerializableChunkData -+ ShortList[] postProcessing = chunk.getPostProcessing(); -+ ShortList[] lists = new ShortList[postProcessing.length]; -+ for (int i = 0; i < postProcessing.length; i++) { -+ lists[i] = postProcessing[i] != null ? new ShortArrayList(postProcessing[i]) : null; -+ } -+ // Leaf end - Some Optimizations on SerializableChunkData - CompoundTag compoundTag = packStructureData( - StructurePieceSerializationContext.fromLevel(level), pos, chunk.getAllStarts(), chunk.getAllReferences() - ); diff --git a/leaf-server/minecraft-patches/features/0137-Spawner-Configurations.patch b/leaf-server/minecraft-patches/features/0133-Spawner-Configurations.patch similarity index 100% rename from leaf-server/minecraft-patches/features/0137-Spawner-Configurations.patch rename to leaf-server/minecraft-patches/features/0133-Spawner-Configurations.patch diff --git a/leaf-server/minecraft-patches/features/0134-Rework-ChunkHolderManager.patch b/leaf-server/minecraft-patches/features/0134-Rework-ChunkHolderManager.patch deleted file mode 100644 index a95feb8d..00000000 --- a/leaf-server/minecraft-patches/features/0134-Rework-ChunkHolderManager.patch +++ /dev/null @@ -1,134 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Taiyou06 -Date: Thu, 27 Feb 2025 23:39:32 +0100 -Subject: [PATCH] Rework ChunkHolderManager - - -diff --git a/ca/spottedleaf/moonrise/patches/chunk_system/scheduling/ChunkHolderManager.java b/ca/spottedleaf/moonrise/patches/chunk_system/scheduling/ChunkHolderManager.java -index be820c6093dd2ae7642b9bee11edf65e3a8d7242..d6a30d6735d24f24a8108b6a5d15725587bb662a 100644 ---- a/ca/spottedleaf/moonrise/patches/chunk_system/scheduling/ChunkHolderManager.java -+++ b/ca/spottedleaf/moonrise/patches/chunk_system/scheduling/ChunkHolderManager.java -@@ -736,24 +736,20 @@ public final class ChunkHolderManager { - - final int sectionShift = ((ChunkSystemServerLevel)this.world).moonrise$getRegionChunkShift(); - -- final Predicate> expireNow = (final Ticket ticket) -> { -- long removeDelay = ((ChunkSystemTicket)(Object)ticket).moonrise$getRemoveDelay(); -- if (removeDelay == NO_TIMEOUT_MARKER) { -- return false; -- } -- --removeDelay; -- ((ChunkSystemTicket)(Object)ticket).moonrise$setRemoveDelay(removeDelay); -- return removeDelay <= 0L; -- }; -- -+ // Leaf start - Rework ChunkHolderManager -+ // Collect sections to process first to avoid concurrent modification issues -+ List sectionKeys = new ArrayList<>(); - for (final PrimitiveIterator.OfLong iterator = this.sectionToChunkToExpireCount.keyIterator(); iterator.hasNext();) { -- final long sectionKey = iterator.nextLong(); -+ sectionKeys.add(iterator.nextLong()); -+ } - -+ for (final Long sectionKey : sectionKeys) { -+ // Skip if section was removed concurrently - if (!this.sectionToChunkToExpireCount.containsKey(sectionKey)) { -- // removed concurrently - continue; - } - -+ // Acquire lock for this section only - final ReentrantAreaLock.Node ticketLock = this.ticketLockArea.lock( - CoordinateUtils.getChunkX(sectionKey) << sectionShift, - CoordinateUtils.getChunkZ(sectionKey) << sectionShift -@@ -761,11 +757,15 @@ public final class ChunkHolderManager { - - try { - final Long2IntOpenHashMap chunkToExpireCount = this.sectionToChunkToExpireCount.get(sectionKey); -- if (chunkToExpireCount == null) { -- // lost to some race -+ if (chunkToExpireCount == null || chunkToExpireCount.isEmpty()) { -+ // Section was removed or is empty, clean up -+ if (chunkToExpireCount != null && chunkToExpireCount.isEmpty()) { -+ this.sectionToChunkToExpireCount.remove(sectionKey); -+ } - continue; - } - -+ // Process each chunk in this section - for (final Iterator iterator1 = chunkToExpireCount.long2IntEntrySet().fastIterator(); iterator1.hasNext();) { - final Long2IntMap.Entry entry = iterator1.next(); - -@@ -773,33 +773,51 @@ public final class ChunkHolderManager { - final int expireCount = entry.getIntValue(); - - final SortedArraySet> tickets = this.tickets.get(chunkKey); -+ if (tickets == null || tickets.isEmpty()) { -+ iterator1.remove(); -+ continue; -+ } -+ - final int levelBefore = getTicketLevelAt(tickets); -+ int expiredCount = 0; - -- final int sizeBefore = tickets.size(); -- tickets.removeIf(expireNow); -- final int sizeAfter = tickets.size(); -- final int levelAfter = getTicketLevelAt(tickets); -+ // More efficient ticket processing - avoids creating a new predicate each time -+ for (Iterator> ticketIterator = tickets.iterator(); ticketIterator.hasNext();) { -+ Ticket ticket = ticketIterator.next(); -+ long removeDelay = ((ChunkSystemTicket)(Object)ticket).moonrise$getRemoveDelay(); -+ -+ if (removeDelay == NO_TIMEOUT_MARKER) { -+ continue; -+ } -+ -+ --removeDelay; -+ if (removeDelay <= 0) { -+ ticketIterator.remove(); -+ expiredCount++; -+ } else { -+ ((ChunkSystemTicket)(Object)ticket).moonrise$setRemoveDelay(removeDelay); -+ } -+ } - - if (tickets.isEmpty()) { - this.tickets.remove(chunkKey); - } -+ -+ final int levelAfter = getTicketLevelAt(tickets); - if (levelBefore != levelAfter) { - this.updateTicketLevel(chunkKey, levelAfter); - } - -- final int newExpireCount = expireCount - (sizeBefore - sizeAfter); -- -- if (newExpireCount == expireCount) { -- continue; -- } -- -- if (newExpireCount != 0) { -- entry.setValue(newExpireCount); -- } else { -+ // Update expire count -+ final int newExpireCount = expireCount - expiredCount; -+ if (newExpireCount <= 0) { - iterator1.remove(); -+ } else if (newExpireCount != expireCount) { -+ entry.setValue(newExpireCount); - } - } - -+ // Remove empty sections - if (chunkToExpireCount.isEmpty()) { - this.sectionToChunkToExpireCount.remove(sectionKey); - } -@@ -807,6 +825,7 @@ public final class ChunkHolderManager { - this.ticketLockArea.unlock(ticketLock); - } - } -+ // Leaf end - Rework ChunkHolderManager - - this.processTicketUpdates(); - } diff --git a/leaf-server/minecraft-patches/features/0138-SparklyPaper-Parallel-world-ticking.patch b/leaf-server/minecraft-patches/features/0134-SparklyPaper-Parallel-world-ticking.patch similarity index 99% rename from leaf-server/minecraft-patches/features/0138-SparklyPaper-Parallel-world-ticking.patch rename to leaf-server/minecraft-patches/features/0134-SparklyPaper-Parallel-world-ticking.patch index b8cc4514..25ad7ee0 100644 --- a/leaf-server/minecraft-patches/features/0138-SparklyPaper-Parallel-world-ticking.patch +++ b/leaf-server/minecraft-patches/features/0134-SparklyPaper-Parallel-world-ticking.patch @@ -6,10 +6,10 @@ Subject: [PATCH] SparklyPaper: Parallel world ticking Original project: https://github.com/SparklyPower/SparklyPaper diff --git a/ca/spottedleaf/moonrise/patches/chunk_system/scheduling/ChunkHolderManager.java b/ca/spottedleaf/moonrise/patches/chunk_system/scheduling/ChunkHolderManager.java -index d6a30d6735d24f24a8108b6a5d15725587bb662a..39517966935265bc4533d4ce414d2df72df5a614 100644 +index be820c6093dd2ae7642b9bee11edf65e3a8d7242..06ac3537f5655d048d770bb004243f207fad9faa 100644 --- a/ca/spottedleaf/moonrise/patches/chunk_system/scheduling/ChunkHolderManager.java +++ b/ca/spottedleaf/moonrise/patches/chunk_system/scheduling/ChunkHolderManager.java -@@ -1050,7 +1050,7 @@ public final class ChunkHolderManager { +@@ -1031,7 +1031,7 @@ public final class ChunkHolderManager { if (changedFullStatus.isEmpty()) { return; } @@ -18,7 +18,7 @@ index d6a30d6735d24f24a8108b6a5d15725587bb662a..39517966935265bc4533d4ce414d2df7 this.taskScheduler.scheduleChunkTask(() -> { final ArrayDeque pendingFullLoadUpdate = ChunkHolderManager.this.pendingFullLoadUpdate; for (int i = 0, len = changedFullStatus.size(); i < len; ++i) { -@@ -1076,7 +1076,12 @@ public final class ChunkHolderManager { +@@ -1057,7 +1057,12 @@ public final class ChunkHolderManager { // note: never call while inside the chunk system, this will absolutely break everything public void processUnloads() { @@ -32,7 +32,7 @@ index d6a30d6735d24f24a8108b6a5d15725587bb662a..39517966935265bc4533d4ce414d2df7 if (BLOCK_TICKET_UPDATES.get() == Boolean.TRUE) { throw new IllegalStateException("Cannot unload chunks recursively"); -@@ -1358,7 +1363,7 @@ public final class ChunkHolderManager { +@@ -1339,7 +1344,7 @@ public final class ChunkHolderManager { List changedFullStatus = null; diff --git a/leaf-server/minecraft-patches/features/0139-SparklyPaper-Track-each-world-MSPT.patch b/leaf-server/minecraft-patches/features/0135-SparklyPaper-Track-each-world-MSPT.patch similarity index 100% rename from leaf-server/minecraft-patches/features/0139-SparklyPaper-Track-each-world-MSPT.patch rename to leaf-server/minecraft-patches/features/0135-SparklyPaper-Track-each-world-MSPT.patch diff --git a/leaf-server/minecraft-patches/features/0140-PaperPR-Fix-cancelled-Projectile-Events-still-consum.patch b/leaf-server/minecraft-patches/features/0136-PaperPR-Fix-cancelled-Projectile-Events-still-consum.patch similarity index 100% rename from leaf-server/minecraft-patches/features/0140-PaperPR-Fix-cancelled-Projectile-Events-still-consum.patch rename to leaf-server/minecraft-patches/features/0136-PaperPR-Fix-cancelled-Projectile-Events-still-consum.patch diff --git a/leaf-server/minecraft-patches/features/0141-Optimize-SetLookAndInteract-and-NearestVisibleLiving.patch b/leaf-server/minecraft-patches/features/0137-Optimize-SetLookAndInteract-and-NearestVisibleLiving.patch similarity index 100% rename from leaf-server/minecraft-patches/features/0141-Optimize-SetLookAndInteract-and-NearestVisibleLiving.patch rename to leaf-server/minecraft-patches/features/0137-Optimize-SetLookAndInteract-and-NearestVisibleLiving.patch diff --git a/leaf-server/minecraft-patches/features/0142-Remove-streams-on-InsideBrownianWalk.patch b/leaf-server/minecraft-patches/features/0138-Remove-streams-on-InsideBrownianWalk.patch similarity index 100% rename from leaf-server/minecraft-patches/features/0142-Remove-streams-on-InsideBrownianWalk.patch rename to leaf-server/minecraft-patches/features/0138-Remove-streams-on-InsideBrownianWalk.patch diff --git a/leaf-server/minecraft-patches/features/0143-Use-BFS-on-getSlopeDistance.patch b/leaf-server/minecraft-patches/features/0139-Use-BFS-on-getSlopeDistance.patch similarity index 100% rename from leaf-server/minecraft-patches/features/0143-Use-BFS-on-getSlopeDistance.patch rename to leaf-server/minecraft-patches/features/0139-Use-BFS-on-getSlopeDistance.patch diff --git a/leaf-server/minecraft-patches/features/0144-Paper-PR-Throttle-failed-spawn-attempts.patch b/leaf-server/minecraft-patches/features/0140-Paper-PR-Throttle-failed-spawn-attempts.patch similarity index 98% rename from leaf-server/minecraft-patches/features/0144-Paper-PR-Throttle-failed-spawn-attempts.patch rename to leaf-server/minecraft-patches/features/0140-Paper-PR-Throttle-failed-spawn-attempts.patch index 94e2d7b8..169e432c 100644 --- a/leaf-server/minecraft-patches/features/0144-Paper-PR-Throttle-failed-spawn-attempts.patch +++ b/leaf-server/minecraft-patches/features/0140-Paper-PR-Throttle-failed-spawn-attempts.patch @@ -174,7 +174,7 @@ index 3a6db5bc0c8be7d68e15317a621c1965fdc3a9bd..50a9903367f49ece2a267d10944b1515 // Paper start - rewrite chunk system private volatile ca.spottedleaf.moonrise.patches.starlight.light.SWMRNibbleArray[] blockNibbles; diff --git a/net/minecraft/world/level/chunk/storage/SerializableChunkData.java b/net/minecraft/world/level/chunk/storage/SerializableChunkData.java -index c0939c311c554a4660b80725294663bab7915733..e2df93b2500a74c4cecac1515f3991967a07a052 100644 +index 6b6aaeca14178b5b709e20ae13552d42217f15c0..e9ece9b618b0a9eb82b9f07a09ee6cb60cf7ec16 100644 --- a/net/minecraft/world/level/chunk/storage/SerializableChunkData.java +++ b/net/minecraft/world/level/chunk/storage/SerializableChunkData.java @@ -92,6 +92,7 @@ public record SerializableChunkData( @@ -229,7 +229,7 @@ index c0939c311c554a4660b80725294663bab7915733..e2df93b2500a74c4cecac1515f399196 if (chunkType == ChunkType.LEVELCHUNK) { return this.loadStarlightLightData(level, new ImposterProtoChunk((LevelChunk)chunkAccess, false)); // Paper - starlight } else { -@@ -603,6 +627,7 @@ public record SerializableChunkData( +@@ -587,6 +611,7 @@ public record SerializableChunkData( persistentDataContainer = chunk.persistentDataContainer.toTagCompound(); } // CraftBukkit end @@ -237,7 +237,7 @@ index c0939c311c554a4660b80725294663bab7915733..e2df93b2500a74c4cecac1515f399196 return new SerializableChunkData( level.registryAccess().lookupOrThrow(Registries.BIOME), pos, -@@ -623,6 +648,7 @@ public record SerializableChunkData( +@@ -607,6 +632,7 @@ public record SerializableChunkData( list1, compoundTag , persistentDataContainer // CraftBukkit - persistentDataContainer @@ -245,7 +245,7 @@ index c0939c311c554a4660b80725294663bab7915733..e2df93b2500a74c4cecac1515f399196 ); } } -@@ -719,6 +745,21 @@ public record SerializableChunkData( +@@ -703,6 +729,21 @@ public record SerializableChunkData( compoundTag.put("ChunkBukkitValues", this.persistentDataContainer); } // CraftBukkit end @@ -267,7 +267,7 @@ index c0939c311c554a4660b80725294663bab7915733..e2df93b2500a74c4cecac1515f399196 // Paper start - starlight if (this.lightCorrect && !this.chunkStatus.isBefore(net.minecraft.world.level.chunk.status.ChunkStatus.LIGHT)) { // clobber vanilla value to force vanilla to relight -@@ -947,4 +988,50 @@ public record SerializableChunkData( +@@ -931,4 +972,50 @@ public record SerializableChunkData( } // Paper end - starlight - convert from record } diff --git a/leaf-server/minecraft-patches/features/0145-Improve-BlockEntity-ticking-isRemoved-check.patch b/leaf-server/minecraft-patches/features/0141-Improve-BlockEntity-ticking-isRemoved-check.patch similarity index 100% rename from leaf-server/minecraft-patches/features/0145-Improve-BlockEntity-ticking-isRemoved-check.patch rename to leaf-server/minecraft-patches/features/0141-Improve-BlockEntity-ticking-isRemoved-check.patch diff --git a/leaf-server/minecraft-patches/features/0146-Raytrace-AntiXray-SDK-integration.patch b/leaf-server/minecraft-patches/features/0142-Raytrace-AntiXray-SDK-integration.patch similarity index 100% rename from leaf-server/minecraft-patches/features/0146-Raytrace-AntiXray-SDK-integration.patch rename to leaf-server/minecraft-patches/features/0142-Raytrace-AntiXray-SDK-integration.patch diff --git a/leaf-server/minecraft-patches/features/0147-Optimize-addOrUpdateTransientModifier.patch b/leaf-server/minecraft-patches/features/0143-Optimize-addOrUpdateTransientModifier.patch similarity index 100% rename from leaf-server/minecraft-patches/features/0147-Optimize-addOrUpdateTransientModifier.patch rename to leaf-server/minecraft-patches/features/0143-Optimize-addOrUpdateTransientModifier.patch diff --git a/leaf-server/minecraft-patches/features/0148-Optimize-ContextMap.create.patch b/leaf-server/minecraft-patches/features/0144-Optimize-ContextMap.create.patch similarity index 100% rename from leaf-server/minecraft-patches/features/0148-Optimize-ContextMap.create.patch rename to leaf-server/minecraft-patches/features/0144-Optimize-ContextMap.create.patch diff --git a/leaf-server/minecraft-patches/features/0149-Micro-optimizations-for-random-tick.patch b/leaf-server/minecraft-patches/features/0145-Micro-optimizations-for-random-tick.patch similarity index 100% rename from leaf-server/minecraft-patches/features/0149-Micro-optimizations-for-random-tick.patch rename to leaf-server/minecraft-patches/features/0145-Micro-optimizations-for-random-tick.patch diff --git a/leaf-server/minecraft-patches/features/0150-Remove-streams-on-updateConnectedPlayersWithinRange.patch b/leaf-server/minecraft-patches/features/0146-Remove-streams-on-updateConnectedPlayersWithinRange.patch similarity index 100% rename from leaf-server/minecraft-patches/features/0150-Remove-streams-on-updateConnectedPlayersWithinRange.patch rename to leaf-server/minecraft-patches/features/0146-Remove-streams-on-updateConnectedPlayersWithinRange.patch diff --git a/leaf-server/minecraft-patches/features/0151-Remove-streams-on-PlayerDetector.patch b/leaf-server/minecraft-patches/features/0147-Remove-streams-on-PlayerDetector.patch similarity index 100% rename from leaf-server/minecraft-patches/features/0151-Remove-streams-on-PlayerDetector.patch rename to leaf-server/minecraft-patches/features/0147-Remove-streams-on-PlayerDetector.patch diff --git a/leaf-server/minecraft-patches/features/0152-Async-Block-Finding.patch b/leaf-server/minecraft-patches/features/0148-Async-Block-Finding.patch similarity index 96% rename from leaf-server/minecraft-patches/features/0152-Async-Block-Finding.patch rename to leaf-server/minecraft-patches/features/0148-Async-Block-Finding.patch index ca8e3fac..c527e135 100644 --- a/leaf-server/minecraft-patches/features/0152-Async-Block-Finding.patch +++ b/leaf-server/minecraft-patches/features/0148-Async-Block-Finding.patch @@ -5,7 +5,7 @@ Subject: [PATCH] Async Block Finding diff --git a/net/minecraft/world/entity/ai/goal/MoveToBlockGoal.java b/net/minecraft/world/entity/ai/goal/MoveToBlockGoal.java -index 3f080b15543bf8c5fa0774b62d7f12e13b82511a..007da9cb39ff76285c52ce0abdff60997acdff0f 100644 +index 3f080b15543bf8c5fa0774b62d7f12e13b82511a..d70ed3ace6fa8f97bcc0d493842f44f43072a610 100644 --- a/net/minecraft/world/entity/ai/goal/MoveToBlockGoal.java +++ b/net/minecraft/world/entity/ai/goal/MoveToBlockGoal.java @@ -20,6 +20,18 @@ public abstract class MoveToBlockGoal extends Goal { @@ -102,7 +102,7 @@ index 3f080b15543bf8c5fa0774b62d7f12e13b82511a..007da9cb39ff76285c52ce0abdff6099 + this.isValidTarget(this.mob.level(), pos)) { + + this.blockPos = pos; -+ this.mob.movingTarget = pos == BlockPos.ZERO ? null : pos; ++ this.mob.movingTarget = this.blockPos == BlockPos.ZERO ? null : this.blockPos; // Use the assigned blockPos + return true; + } + } @@ -177,7 +177,7 @@ index 3f080b15543bf8c5fa0774b62d7f12e13b82511a..007da9cb39ff76285c52ce0abdff6099 - this.blockPos = mutableBlockPos; - this.mob.movingTarget = mutableBlockPos == BlockPos.ZERO ? null : mutableBlockPos.immutable(); // Paper + this.blockPos = mutableBlockPos.immutable(); // Leaf - Async Block Finding -+ this.mob.movingTarget = this.blockPos == BlockPos.ZERO ? null : this.blockPos; // Paper // Leaf - Async Block Finding ++ this.mob.movingTarget = this.blockPos == BlockPos.ZERO ? null : this.blockPos; // Paper // Leaf - Async Block Finding - Use the assigned blockPos return true; } } diff --git a/leaf-server/minecraft-patches/features/0153-Use-direct-iteration-on-Sensing.tick.patch b/leaf-server/minecraft-patches/features/0149-Use-direct-iteration-on-Sensing.tick.patch similarity index 100% rename from leaf-server/minecraft-patches/features/0153-Use-direct-iteration-on-Sensing.tick.patch rename to leaf-server/minecraft-patches/features/0149-Use-direct-iteration-on-Sensing.tick.patch diff --git a/leaf-server/minecraft-patches/features/0154-Optimise-non-flush-packet-sending.patch b/leaf-server/minecraft-patches/features/0150-Optimise-non-flush-packet-sending.patch similarity index 100% rename from leaf-server/minecraft-patches/features/0154-Optimise-non-flush-packet-sending.patch rename to leaf-server/minecraft-patches/features/0150-Optimise-non-flush-packet-sending.patch diff --git a/leaf-server/minecraft-patches/features/0155-Prevent-double-chunk-retrieving-in-entity-fluid-push.patch b/leaf-server/minecraft-patches/features/0151-Prevent-double-chunk-retrieving-in-entity-fluid-push.patch similarity index 100% rename from leaf-server/minecraft-patches/features/0155-Prevent-double-chunk-retrieving-in-entity-fluid-push.patch rename to leaf-server/minecraft-patches/features/0151-Prevent-double-chunk-retrieving-in-entity-fluid-push.patch diff --git a/leaf-server/minecraft-patches/features/0157-Null-handling-on-MultifaceSpreader.patch b/leaf-server/minecraft-patches/features/0152-Null-handling-on-MultifaceSpreader.patch similarity index 100% rename from leaf-server/minecraft-patches/features/0157-Null-handling-on-MultifaceSpreader.patch rename to leaf-server/minecraft-patches/features/0152-Null-handling-on-MultifaceSpreader.patch diff --git a/leaf-server/minecraft-patches/features/0158-More-virtual-threads.patch b/leaf-server/minecraft-patches/features/0153-More-virtual-threads.patch similarity index 100% rename from leaf-server/minecraft-patches/features/0158-More-virtual-threads.patch rename to leaf-server/minecraft-patches/features/0153-More-virtual-threads.patch diff --git a/leaf-server/minecraft-patches/features/0154-Replace-ConcurrentLong2ReferenceChainedHashTable-wit.patch b/leaf-server/minecraft-patches/features/0154-Replace-ConcurrentLong2ReferenceChainedHashTable-wit.patch new file mode 100644 index 00000000..11c5606a --- /dev/null +++ b/leaf-server/minecraft-patches/features/0154-Replace-ConcurrentLong2ReferenceChainedHashTable-wit.patch @@ -0,0 +1,148 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Taiyou06 +Date: Sun, 13 Apr 2025 16:15:17 +0200 +Subject: [PATCH] Replace ConcurrentLong2ReferenceChainedHashTable with custom + map + + +diff --git a/ca/spottedleaf/moonrise/patches/chunk_system/queue/ChunkUnloadQueue.java b/ca/spottedleaf/moonrise/patches/chunk_system/queue/ChunkUnloadQueue.java +index 7eafc5b7cba23d8dec92ecc1050afe3fd8c9e309..c2d5e83f0bdf98d3c07d6da2bba3b1ebaf7307d5 100644 +--- a/ca/spottedleaf/moonrise/patches/chunk_system/queue/ChunkUnloadQueue.java ++++ b/ca/spottedleaf/moonrise/patches/chunk_system/queue/ChunkUnloadQueue.java +@@ -7,6 +7,8 @@ import com.google.gson.JsonElement; + import com.google.gson.JsonObject; + import it.unimi.dsi.fastutil.longs.LongIterator; + import it.unimi.dsi.fastutil.longs.LongLinkedOpenHashSet; ++import org.dreeam.leaf.util.map.spottedleaf.LeafConcurrentLong2ReferenceChainedHashTable; ++ + import java.util.ArrayList; + import java.util.Iterator; + import java.util.List; +@@ -16,7 +18,7 @@ public final class ChunkUnloadQueue { + + public final int coordinateShift; + private final AtomicLong orderGenerator = new AtomicLong(); +- private final ConcurrentLong2ReferenceChainedHashTable unloadSections = new ConcurrentLong2ReferenceChainedHashTable<>(); ++ private final LeafConcurrentLong2ReferenceChainedHashTable unloadSections = new LeafConcurrentLong2ReferenceChainedHashTable<>(); + + /* + * Note: write operations do not occur in parallel for any given section. +@@ -32,8 +34,8 @@ public final class ChunkUnloadQueue { + public List retrieveForAllRegions() { + final List ret = new ArrayList<>(); + +- for (final Iterator> iterator = this.unloadSections.entryIterator(); iterator.hasNext();) { +- final ConcurrentLong2ReferenceChainedHashTable.TableEntry entry = iterator.next(); ++ for (final Iterator> iterator = this.unloadSections.entryIterator(); iterator.hasNext();) { ++ final LeafConcurrentLong2ReferenceChainedHashTable.TableEntry entry = iterator.next(); + final long key = entry.getKey(); + final UnloadSection section = entry.getValue(); + final int sectionX = CoordinateUtils.getChunkX(key); +@@ -141,4 +143,4 @@ public final class ChunkUnloadQueue { + this.order = order; + } + } +-} +\ No newline at end of file ++} +diff --git a/ca/spottedleaf/moonrise/patches/chunk_system/scheduling/ChunkHolderManager.java b/ca/spottedleaf/moonrise/patches/chunk_system/scheduling/ChunkHolderManager.java +index 06ac3537f5655d048d770bb004243f207fad9faa..a1f328a5c4ccc030c99762a68008ab1ecebdc06e 100644 +--- a/ca/spottedleaf/moonrise/patches/chunk_system/scheduling/ChunkHolderManager.java ++++ b/ca/spottedleaf/moonrise/patches/chunk_system/scheduling/ChunkHolderManager.java +@@ -40,6 +40,7 @@ import net.minecraft.util.SortedArraySet; + import net.minecraft.util.Unit; + import net.minecraft.world.level.ChunkPos; + import net.minecraft.world.level.chunk.LevelChunk; ++import org.dreeam.leaf.util.map.spottedleaf.LeafConcurrentLong2ReferenceChainedHashTable; + import org.slf4j.Logger; + import java.io.IOException; + import java.text.DecimalFormat; +@@ -71,11 +72,11 @@ public final class ChunkHolderManager { + private static final long PROBE_MARKER = Long.MIN_VALUE + 1; + public final ReentrantAreaLock ticketLockArea; + +- private final ConcurrentLong2ReferenceChainedHashTable>> tickets = new ConcurrentLong2ReferenceChainedHashTable<>(); +- private final ConcurrentLong2ReferenceChainedHashTable sectionToChunkToExpireCount = new ConcurrentLong2ReferenceChainedHashTable<>(); ++ private final LeafConcurrentLong2ReferenceChainedHashTable>> tickets = new LeafConcurrentLong2ReferenceChainedHashTable<>(); ++ private final LeafConcurrentLong2ReferenceChainedHashTable sectionToChunkToExpireCount = new LeafConcurrentLong2ReferenceChainedHashTable<>(); + final ChunkUnloadQueue unloadQueue; + +- private final ConcurrentLong2ReferenceChainedHashTable chunkHolders = ConcurrentLong2ReferenceChainedHashTable.createWithCapacity(16384, 0.25f); ++ private final LeafConcurrentLong2ReferenceChainedHashTable chunkHolders = LeafConcurrentLong2ReferenceChainedHashTable.createWithCapacity(16384, 0.25f); + private final ServerLevel world; + private final ChunkTaskScheduler taskScheduler; + private long currentTick; +@@ -1422,9 +1423,9 @@ public final class ChunkHolderManager { + final JsonArray allTicketsJson = new JsonArray(); + ret.add("tickets", allTicketsJson); + +- for (final Iterator>>> iterator = this.tickets.entryIterator(); ++ for (final Iterator>>> iterator = this.tickets.entryIterator(); + iterator.hasNext();) { +- final ConcurrentLong2ReferenceChainedHashTable.TableEntry>> coordinateTickets = iterator.next(); ++ final LeafConcurrentLong2ReferenceChainedHashTable.TableEntry>> coordinateTickets = iterator.next(); + final long coordinate = coordinateTickets.getKey(); + final SortedArraySet> tickets = coordinateTickets.getValue(); + +diff --git a/ca/spottedleaf/moonrise/patches/chunk_system/scheduling/ThreadedTicketLevelPropagator.java b/ca/spottedleaf/moonrise/patches/chunk_system/scheduling/ThreadedTicketLevelPropagator.java +index 310a8f80debadd64c2d962ebf83b7d0505ce6e42..b69d256e2f6bab3c1b90c5f8c42caa3d80cd67a4 100644 +--- a/ca/spottedleaf/moonrise/patches/chunk_system/scheduling/ThreadedTicketLevelPropagator.java ++++ b/ca/spottedleaf/moonrise/patches/chunk_system/scheduling/ThreadedTicketLevelPropagator.java +@@ -10,6 +10,8 @@ import it.unimi.dsi.fastutil.longs.Long2ByteLinkedOpenHashMap; + import it.unimi.dsi.fastutil.shorts.Short2ByteLinkedOpenHashMap; + import it.unimi.dsi.fastutil.shorts.Short2ByteMap; + import it.unimi.dsi.fastutil.shorts.ShortOpenHashSet; ++import org.dreeam.leaf.util.map.spottedleaf.LeafConcurrentLong2ReferenceChainedHashTable; ++ + import java.lang.invoke.VarHandle; + import java.util.ArrayDeque; + import java.util.ArrayList; +@@ -35,11 +37,11 @@ public abstract class ThreadedTicketLevelPropagator { + } + + private final UpdateQueue updateQueue; +- private final ConcurrentLong2ReferenceChainedHashTable
sections; ++ private final LeafConcurrentLong2ReferenceChainedHashTable
sections; + + public ThreadedTicketLevelPropagator() { + this.updateQueue = new UpdateQueue(); +- this.sections = new ConcurrentLong2ReferenceChainedHashTable<>(); ++ this.sections = new LeafConcurrentLong2ReferenceChainedHashTable<>(); + } + + // must hold ticket lock for: +diff --git a/ca/spottedleaf/moonrise/patches/starlight/light/StarLightInterface.java b/ca/spottedleaf/moonrise/patches/starlight/light/StarLightInterface.java +index 1487b7d8be435b3fbad2aabd05796965b4775a87..54c425ba84c7c70becdfbde08812afdde777f5a8 100644 +--- a/ca/spottedleaf/moonrise/patches/starlight/light/StarLightInterface.java ++++ b/ca/spottedleaf/moonrise/patches/starlight/light/StarLightInterface.java +@@ -27,6 +27,8 @@ import net.minecraft.world.level.chunk.LightChunkGetter; + import net.minecraft.world.level.chunk.status.ChunkStatus; + import net.minecraft.world.level.lighting.LayerLightEventListener; + import net.minecraft.world.level.lighting.LevelLightEngine; ++import org.dreeam.leaf.util.map.spottedleaf.LeafConcurrentLong2ReferenceChainedHashTable; ++ + import java.util.ArrayDeque; + import java.util.ArrayList; + import java.util.HashSet; +@@ -740,7 +742,7 @@ public final class StarLightInterface { + + public static final class ServerLightQueue extends LightQueue { + +- private final ConcurrentLong2ReferenceChainedHashTable chunkTasks = new ConcurrentLong2ReferenceChainedHashTable<>(); ++ private final LeafConcurrentLong2ReferenceChainedHashTable chunkTasks = new LeafConcurrentLong2ReferenceChainedHashTable<>(); + + public ServerLightQueue(final StarLightInterface lightInterface) { + super(lightInterface); +diff --git a/net/minecraft/server/level/ServerChunkCache.java b/net/minecraft/server/level/ServerChunkCache.java +index b1f1b596a597d559aa672a3cb46a03917ad746af..d75f85208da0c7424fc95ae0d8ebb0a725dda0a7 100644 +--- a/net/minecraft/server/level/ServerChunkCache.java ++++ b/net/minecraft/server/level/ServerChunkCache.java +@@ -72,7 +72,7 @@ public class ServerChunkCache extends ChunkSource implements ca.spottedleaf.moon + @VisibleForDebug + private NaturalSpawner.SpawnState lastSpawnState; + // Paper start +- private final ca.spottedleaf.concurrentutil.map.ConcurrentLong2ReferenceChainedHashTable fullChunks = new ca.spottedleaf.concurrentutil.map.ConcurrentLong2ReferenceChainedHashTable<>(); ++ private final org.dreeam.leaf.util.map.spottedleaf.LeafConcurrentLong2ReferenceChainedHashTable fullChunks = new org.dreeam.leaf.util.map.spottedleaf.LeafConcurrentLong2ReferenceChainedHashTable<>(); + public int getFullChunksCount() { + return this.fullChunks.size(); + } diff --git a/leaf-server/minecraft-patches/features/0156-Async-target-finding.patch b/leaf-server/minecraft-patches/features/0155-Async-target-finding.patch similarity index 100% rename from leaf-server/minecraft-patches/features/0156-Async-target-finding.patch rename to leaf-server/minecraft-patches/features/0155-Async-target-finding.patch diff --git a/leaf-server/minecraft-patches/features/0156-Reworked-ChunkHolderManager.patch b/leaf-server/minecraft-patches/features/0156-Reworked-ChunkHolderManager.patch new file mode 100644 index 00000000..a2e6aac8 --- /dev/null +++ b/leaf-server/minecraft-patches/features/0156-Reworked-ChunkHolderManager.patch @@ -0,0 +1,151 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Taiyou06 +Date: Mon, 14 Apr 2025 03:02:42 +0200 +Subject: [PATCH] Reworked ChunkHolderManager + + +diff --git a/ca/spottedleaf/moonrise/patches/chunk_system/scheduling/ChunkHolderManager.java b/ca/spottedleaf/moonrise/patches/chunk_system/scheduling/ChunkHolderManager.java +index a1f328a5c4ccc030c99762a68008ab1ecebdc06e..3de8d0fb485e55f3fc38a65c251f109335595468 100644 +--- a/ca/spottedleaf/moonrise/patches/chunk_system/scheduling/ChunkHolderManager.java ++++ b/ca/spottedleaf/moonrise/patches/chunk_system/scheduling/ChunkHolderManager.java +@@ -349,12 +349,13 @@ public final class ChunkHolderManager { + @Override + protected void processLevelUpdates(final Long2ByteLinkedOpenHashMap updates) { + // first the necessary chunkholders must be created, so just update the ticket levels ++ final LeafConcurrentLong2ReferenceChainedHashTable holderMap = ChunkHolderManager.this.chunkHolders; + for (final Iterator iterator = updates.long2ByteEntrySet().fastIterator(); iterator.hasNext();) { + final Long2ByteMap.Entry entry = iterator.next(); + final long key = entry.getLongKey(); + final int newLevel = convertBetweenTicketLevels((int)entry.getByteValue()); + +- NewChunkHolder current = ChunkHolderManager.this.chunkHolders.get(key); ++ NewChunkHolder current = holderMap.get(key); + if (current == null && newLevel > MAX_TICKET_LEVEL) { + // not loaded and it shouldn't be loaded! + iterator.remove(); +@@ -371,7 +372,7 @@ public final class ChunkHolderManager { + if (current == null) { + // must create + current = ChunkHolderManager.this.createChunkHolder(key); +- ChunkHolderManager.this.chunkHolders.put(key, current); ++ holderMap.put(key, current); + current.updateTicketLevel(newLevel); + } else { + current.updateTicketLevel(newLevel); +@@ -737,20 +738,23 @@ public final class ChunkHolderManager { + + final int sectionShift = ((ChunkSystemServerLevel)this.world).moonrise$getRegionChunkShift(); + ++ + final Predicate> expireNow = (final Ticket ticket) -> { + long removeDelay = ((ChunkSystemTicket)(Object)ticket).moonrise$getRemoveDelay(); + if (removeDelay == NO_TIMEOUT_MARKER) { + return false; + } + --removeDelay; +- ((ChunkSystemTicket)(Object)ticket).moonrise$setRemoveDelay(removeDelay); +- return removeDelay <= 0L; ++ final long nextDelay = removeDelay - 1; ++ ((ChunkSystemTicket)(Object)ticket).moonrise$setRemoveDelay(nextDelay); ++ return nextDelay <= 0L; + }; + + for (final PrimitiveIterator.OfLong iterator = this.sectionToChunkToExpireCount.keyIterator(); iterator.hasNext();) { + final long sectionKey = iterator.nextLong(); + + if (!this.sectionToChunkToExpireCount.containsKey(sectionKey)) { ++ + // removed concurrently + continue; + } +@@ -773,37 +777,62 @@ public final class ChunkHolderManager { + final long chunkKey = entry.getLongKey(); + final int expireCount = entry.getIntValue(); + ++ + final SortedArraySet> tickets = this.tickets.get(chunkKey); +- final int levelBefore = getTicketLevelAt(tickets); ++ if (tickets == null) { ++ iterator1.remove(); ++ continue; ++ } + ++ final int levelBefore; ++ final Ticket firstBefore; + final int sizeBefore = tickets.size(); +- tickets.removeIf(expireNow); +- final int sizeAfter = tickets.size(); +- final int levelAfter = getTicketLevelAt(tickets); + +- if (tickets.isEmpty()) { +- this.tickets.remove(chunkKey); +- } +- if (levelBefore != levelAfter) { +- this.updateTicketLevel(chunkKey, levelAfter); ++ if (!tickets.isEmpty()) { ++ firstBefore = tickets.first(); ++ levelBefore = firstBefore.getTicketLevel(); ++ } else { ++ firstBefore = null; ++ levelBefore = MAX_TICKET_LEVEL + 1; + } + +- final int newExpireCount = expireCount - (sizeBefore - sizeAfter); ++ final boolean changed = tickets.removeIf(expireNow); + +- if (newExpireCount == expireCount) { +- continue; +- } ++ if (changed) { ++ final int sizeAfter = tickets.size(); ++ final int levelAfter; ++ boolean levelMightHaveChanged = true; + +- if (newExpireCount != 0) { +- entry.setValue(newExpireCount); +- } else { +- iterator1.remove(); ++ if (tickets.isEmpty()) { ++ levelAfter = MAX_TICKET_LEVEL + 1; ++ this.tickets.remove(chunkKey); ++ } else { ++ final Ticket firstAfter = tickets.first(); ++ if (firstBefore == firstAfter) { ++ levelMightHaveChanged = false; ++ levelAfter = levelBefore; ++ } else { ++ levelAfter = firstAfter.getTicketLevel(); ++ } ++ } ++ ++ if (levelMightHaveChanged && levelBefore != levelAfter) { ++ this.updateTicketLevel(chunkKey, levelAfter); ++ } ++ ++ final int removedCount = sizeBefore - sizeAfter; ++ if (removedCount > 0) { ++ final int newExpireCount = expireCount - removedCount; ++ if (newExpireCount > 0) { ++ entry.setValue(newExpireCount); ++ } else { ++ iterator1.remove(); ++ } ++ } + } +- } + +- if (chunkToExpireCount.isEmpty()) { +- this.sectionToChunkToExpireCount.remove(sectionKey); + } ++ if (chunkToExpireCount.isEmpty()) { this.sectionToChunkToExpireCount.remove(sectionKey); } + } finally { + this.ticketLockArea.unlock(ticketLock); + } +@@ -812,6 +841,7 @@ public final class ChunkHolderManager { + this.processTicketUpdates(); + } + ++ + public NewChunkHolder getChunkHolder(final int chunkX, final int chunkZ) { + return this.chunkHolders.get(CoordinateUtils.getChunkKey(chunkX, chunkZ)); + } diff --git a/leaf-server/minecraft-patches/features/0157-Optimize-ThreadedTicketLevelPropagator.patch b/leaf-server/minecraft-patches/features/0157-Optimize-ThreadedTicketLevelPropagator.patch new file mode 100644 index 00000000..cc977ac4 --- /dev/null +++ b/leaf-server/minecraft-patches/features/0157-Optimize-ThreadedTicketLevelPropagator.patch @@ -0,0 +1,277 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Taiyou06 +Date: Mon, 14 Apr 2025 14:36:57 +0200 +Subject: [PATCH] Optimize ThreadedTicketLevelPropagator + + +diff --git a/ca/spottedleaf/moonrise/patches/chunk_system/scheduling/ThreadedTicketLevelPropagator.java b/ca/spottedleaf/moonrise/patches/chunk_system/scheduling/ThreadedTicketLevelPropagator.java +index b69d256e2f6bab3c1b90c5f8c42caa3d80cd67a4..e8dddc7fca4b0383844be5337a87c4bc1de204b7 100644 +--- a/ca/spottedleaf/moonrise/patches/chunk_system/scheduling/ThreadedTicketLevelPropagator.java ++++ b/ca/spottedleaf/moonrise/patches/chunk_system/scheduling/ThreadedTicketLevelPropagator.java +@@ -780,11 +780,13 @@ public abstract class ThreadedTicketLevelPropagator { + // minimum number of bits to represent [0, SECTION_SIZE * SECTION_CACHE_WIDTH) + private static final int COORDINATE_BITS = 9; + private static final int COORDINATE_SIZE = 1 << COORDINATE_BITS; ++ + static { + if ((SECTION_SIZE * SECTION_CACHE_WIDTH) > (1 << COORDINATE_BITS)) { + throw new IllegalStateException("Adjust COORDINATE_BITS"); + } + } ++ + // index = x + (z * SECTION_CACHE_WIDTH) + // (this requires x >= 0 and z >= 0) + private final Section[] sections = new Section[SECTION_CACHE_WIDTH * SECTION_CACHE_WIDTH]; +@@ -828,8 +830,8 @@ public abstract class ThreadedTicketLevelPropagator { + // must hold ticket lock for (centerSectionX,centerSectionZ) in radius rad + // must call setupEncodeOffset + private final void setupCaches(final ThreadedTicketLevelPropagator propagator, +- final int centerSectionX, final int centerSectionZ, +- final int rad) { ++ final int centerSectionX, final int centerSectionZ, ++ final int rad) { + for (int dz = -rad; dz <= rad; ++dz) { + for (int dx = -rad; dx <= rad; ++dx) { + final int sectionX = centerSectionX + dx; +@@ -847,29 +849,29 @@ public abstract class ThreadedTicketLevelPropagator { + } + + private final void setSectionInCache(final int sectionX, final int sectionZ, final Section section) { +- this.sections[sectionX + SECTION_CACHE_WIDTH*sectionZ + this.sectionIndexOffset] = section; ++ this.sections[sectionX + SECTION_CACHE_WIDTH * sectionZ + this.sectionIndexOffset] = section; + } + + private final Section getSection(final int sectionX, final int sectionZ) { +- return this.sections[sectionX + SECTION_CACHE_WIDTH*sectionZ + this.sectionIndexOffset]; ++ return this.sections[sectionX + SECTION_CACHE_WIDTH * sectionZ + this.sectionIndexOffset]; + } + + private final int getLevel(final int posX, final int posZ) { +- final Section section = this.sections[(posX >> SECTION_SHIFT) + SECTION_CACHE_WIDTH*(posZ >> SECTION_SHIFT) + this.sectionIndexOffset]; ++ final Section section = this.sections[(posX >> SECTION_SHIFT) + SECTION_CACHE_WIDTH * (posZ >> SECTION_SHIFT) + this.sectionIndexOffset]; + if (section != null) { +- return (int)section.levels[(posX & (SECTION_SIZE - 1)) | ((posZ & (SECTION_SIZE - 1)) << SECTION_SHIFT)] & 0xFF; ++ return (int) section.levels[(posX & (SECTION_SIZE - 1)) | ((posZ & (SECTION_SIZE - 1)) << SECTION_SHIFT)] & 0xFF; + } + + return 0; + } + + private final void setLevel(final int posX, final int posZ, final int to) { +- final Section section = this.sections[(posX >> SECTION_SHIFT) + SECTION_CACHE_WIDTH*(posZ >> SECTION_SHIFT) + this.sectionIndexOffset]; ++ final Section section = this.sections[(posX >> SECTION_SHIFT) + SECTION_CACHE_WIDTH * (posZ >> SECTION_SHIFT) + this.sectionIndexOffset]; + if (section != null) { + final int index = (posX & (SECTION_SIZE - 1)) | ((posZ & (SECTION_SIZE - 1)) << SECTION_SHIFT); + final short level = section.levels[index]; +- section.levels[index] = (short)((level & ~0xFF) | (to & 0xFF)); +- this.updatedPositions.put(CoordinateUtils.getChunkKey(posX, posZ), (byte)to); ++ section.levels[index] = (short) ((level & ~0xFF) | (to & 0xFF)); ++ this.updatedPositions.put(CoordinateUtils.getChunkKey(posX, posZ), (byte) to); + } + } + +@@ -882,8 +884,8 @@ public abstract class ThreadedTicketLevelPropagator { + // next LEVEL_BITS (6) bits: propagated level [0, 63] + // propagation directions bitset (16 bits): + private static final long ALL_DIRECTIONS_BITSET = ( +- // z = -1 +- (1L << ((1 - 1) | ((1 - 1) << 2))) | ++ // z = -1 ++ (1L << ((1 - 1) | ((1 - 1) << 2))) | + (1L << ((1 + 0) | ((1 - 1) << 2))) | + (1L << ((1 + 1) | ((1 - 1) << 2))) | + +@@ -920,7 +922,7 @@ public abstract class ThreadedTicketLevelPropagator { + } + + private void ch(long bs, int shift) { +- int bitset = (int)(bs >>> shift); ++ int bitset = (int) (bs >>> shift); + for (int i = 0, len = Integer.bitCount(bitset); i < len; ++i) { + final int set = Integer.numberOfTrailingZeros(bitset); + final int tailingBit = (-bitset) & bitset; +@@ -1000,27 +1002,38 @@ public abstract class ThreadedTicketLevelPropagator { + final int decodeOffsetZ = -this.encodeOffsetZ; + final int encodeOffset = this.coordinateOffset; + final int sectionOffset = this.sectionIndexOffset; ++ final Section[] sectionsArray = this.sections; + + final Long2ByteLinkedOpenHashMap updatedPositions = this.updatedPositions; + + while (queueReadIndex < queueLength) { + final long queueValue = queue[queueReadIndex++]; + +- final int posX = ((int)queueValue & (COORDINATE_SIZE - 1)) + decodeOffsetX; +- final int posZ = (((int)queueValue >>> COORDINATE_BITS) & (COORDINATE_SIZE - 1)) + decodeOffsetZ; +- final int propagatedLevel = ((int)queueValue >>> (COORDINATE_BITS + COORDINATE_BITS)) & (LEVEL_COUNT - 1); ++ final int posX = ((int) queueValue & (COORDINATE_SIZE - 1)) + decodeOffsetX; ++ final int posZ = (((int) queueValue >>> COORDINATE_BITS) & (COORDINATE_SIZE - 1)) + decodeOffsetZ; ++ final int propagatedLevel = ((int) queueValue >>> (COORDINATE_BITS + COORDINATE_BITS)) & (LEVEL_COUNT - 1); + // note: the above code requires coordinate bits * 2 < 32 + // bitset is 16 bits +- int propagateDirectionBitset = (int)(queueValue >>> (COORDINATE_BITS + COORDINATE_BITS + LEVEL_BITS)) & ((1 << 16) - 1); ++ int propagateDirectionBitset = (int) (queueValue >>> (COORDINATE_BITS + COORDINATE_BITS + LEVEL_BITS)) & ((1 << 16) - 1); + + if ((queueValue & FLAG_RECHECK_LEVEL) != 0L) { +- if (this.getLevel(posX, posZ) != propagatedLevel) { ++ final int sectionX = posX >> SECTION_SHIFT; ++ final int sectionZ = posZ >> SECTION_SHIFT; ++ final Section section = sectionsArray[sectionX + (sectionZ * SECTION_CACHE_WIDTH) + sectionOffset]; ++ final int localIdx = (posX & (SECTION_SIZE - 1)) | ((posZ & (SECTION_SIZE - 1)) << SECTION_SHIFT); ++ if ((section.levels[localIdx] & 0xFF) != propagatedLevel) { + // not at the level we expect, so something changed. + continue; + } + } else if ((queueValue & FLAG_WRITE_LEVEL) != 0L) { + // these are used to restore sources after a propagation decrease +- this.setLevel(posX, posZ, propagatedLevel); ++ final int sectionX = posX >> SECTION_SHIFT; ++ final int sectionZ = posZ >> SECTION_SHIFT; ++ final Section section = sectionsArray[sectionX + (sectionZ * SECTION_CACHE_WIDTH) + sectionOffset]; ++ final int localIdx = (posX & (SECTION_SIZE - 1)) | ((posZ & (SECTION_SIZE - 1)) << SECTION_SHIFT); ++ final short currentLevel = section.levels[localIdx]; ++ section.levels[localIdx] = (short) ((currentLevel & ~0xFF) | (propagatedLevel & 0xFF)); ++ updatedPositions.put(CoordinateUtils.getChunkKey(posX, posZ), (byte) propagatedLevel); + } + + // this bitset represents the values that we have not propagated to +@@ -1036,8 +1049,8 @@ public abstract class ThreadedTicketLevelPropagator { + // must guarantee that either we propagate everything in 1 radius or we partially propagate for 1 radius + // but the rest not propagated are already handled + long currentPropagation = ~( +- // z = -1 +- (1L << ((2 - 1) | ((2 - 1) << 3))) | ++ // z = -1 ++ (1L << ((2 - 1) | ((2 - 1) << 3))) | + (1L << ((2 + 0) | ((2 - 1) << 3))) | + (1L << ((2 + 1) | ((2 - 1) << 3))) | + +@@ -1095,7 +1108,7 @@ public abstract class ThreadedTicketLevelPropagator { + currentPropagation ^= (bitsetLine1 | bitsetLine2 | bitsetLine3); + + // now try to propagate +- final Section section = this.sections[sectionIndex]; ++ final Section section = sectionsArray[sectionIndex]; + + // lower 8 bits are current level, next upper 7 bits are source level, next 1 bit is updated source flag + final short currentStoredLevel = section.levels[localIndex]; +@@ -1106,8 +1119,8 @@ public abstract class ThreadedTicketLevelPropagator { + } + + // update level +- section.levels[localIndex] = (short)((currentStoredLevel & ~0xFF) | (toPropagate & 0xFF)); +- updatedPositions.putAndMoveToLast(CoordinateUtils.getChunkKey(offX, offZ), (byte)toPropagate); ++ section.levels[localIndex] = (short) ((currentStoredLevel & ~0xFF) | (toPropagate & 0xFF)); ++ updatedPositions.putAndMoveToLast(CoordinateUtils.getChunkKey(offX, offZ), (byte) toPropagate); + + // queue next + if (toPropagate > 1) { +@@ -1115,7 +1128,7 @@ public abstract class ThreadedTicketLevelPropagator { + // the child bitset is 4x4, so we just shift each line by 4 + // add the propagation bitset offset to each line to make it easy to OR it into the propagation queue value + final long childPropagation = +- ((bitsetLine1 >>> (start)) << (COORDINATE_BITS + COORDINATE_BITS + LEVEL_BITS)) | // z = -1 ++ ((bitsetLine1 >>> (start)) << (COORDINATE_BITS + COORDINATE_BITS + LEVEL_BITS)) | // z = -1 + ((bitsetLine2 >>> (start + 8)) << (4 + COORDINATE_BITS + COORDINATE_BITS + LEVEL_BITS)) | // z = 0 + ((bitsetLine3 >>> (start + (8 + 8))) << (4 + 4 + COORDINATE_BITS + COORDINATE_BITS + LEVEL_BITS)); // z = 1 + +@@ -1125,7 +1138,7 @@ public abstract class ThreadedTicketLevelPropagator { + queue = this.resizeIncreaseQueue(); + } + queue[queueLength++] = +- ((long)(offX + (offZ << COORDINATE_BITS) + encodeOffset) & ((1L << (COORDINATE_BITS + COORDINATE_BITS)) - 1)) | ++ ((long) (offX + (offZ << COORDINATE_BITS) + encodeOffset) & ((1L << (COORDINATE_BITS + COORDINATE_BITS)) - 1)) | + ((toPropagate & (LEVEL_COUNT - 1L)) << (COORDINATE_BITS + COORDINATE_BITS)) | + childPropagation; //(ALL_DIRECTIONS_BITSET << (COORDINATE_BITS + COORDINATE_BITS + LEVEL_BITS)); + continue; +@@ -1146,18 +1159,19 @@ public abstract class ThreadedTicketLevelPropagator { + final int decodeOffsetZ = -this.encodeOffsetZ; + final int encodeOffset = this.coordinateOffset; + final int sectionOffset = this.sectionIndexOffset; ++ final Section[] sectionsArray = this.sections; + + final Long2ByteLinkedOpenHashMap updatedPositions = this.updatedPositions; + + while (queueReadIndex < queueLength) { + final long queueValue = queue[queueReadIndex++]; + +- final int posX = ((int)queueValue & (COORDINATE_SIZE - 1)) + decodeOffsetX; +- final int posZ = (((int)queueValue >>> COORDINATE_BITS) & (COORDINATE_SIZE - 1)) + decodeOffsetZ; +- final int propagatedLevel = ((int)queueValue >>> (COORDINATE_BITS + COORDINATE_BITS)) & (LEVEL_COUNT - 1); ++ final int posX = ((int) queueValue & (COORDINATE_SIZE - 1)) + decodeOffsetX; ++ final int posZ = (((int) queueValue >>> COORDINATE_BITS) & (COORDINATE_SIZE - 1)) + decodeOffsetZ; ++ final int propagatedLevel = ((int) queueValue >>> (COORDINATE_BITS + COORDINATE_BITS)) & (LEVEL_COUNT - 1); + // note: the above code requires coordinate bits * 2 < 32 + // bitset is 16 bits +- int propagateDirectionBitset = (int)(queueValue >>> (COORDINATE_BITS + COORDINATE_BITS + LEVEL_BITS)) & ((1 << 16) - 1); ++ int propagateDirectionBitset = (int) (queueValue >>> (COORDINATE_BITS + COORDINATE_BITS + LEVEL_BITS)) & ((1 << 16) - 1); + + // this bitset represents the values that we have not propagated to + // this bitset lets us determine what directions the neighbours we set should propagate to, in most cases +@@ -1172,8 +1186,8 @@ public abstract class ThreadedTicketLevelPropagator { + // must guarantee that either we propagate everything in 1 radius or we partially propagate for 1 radius + // but the rest not propagated are already handled + long currentPropagation = ~( +- // z = -1 +- (1L << ((2 - 1) | ((2 - 1) << 3))) | ++ // z = -1 ++ (1L << ((2 - 1) | ((2 - 1) << 3))) | + (1L << ((2 + 0) | ((2 - 1) << 3))) | + (1L << ((2 + 1) | ((2 - 1) << 3))) | + +@@ -1229,7 +1243,7 @@ public abstract class ThreadedTicketLevelPropagator { + final long bitsetLine3 = currentPropagation & (7L << (start + (8 + 8))); + + // now try to propagate +- final Section section = this.sections[sectionIndex]; ++ final Section section = sectionsArray[sectionIndex]; + + // lower 8 bits are current level, next upper 7 bits are source level, next 1 bit is updated source flag + final short currentStoredLevel = section.levels[localIndex]; +@@ -1246,7 +1260,7 @@ public abstract class ThreadedTicketLevelPropagator { + increaseQueue = this.resizeIncreaseQueue(); + } + increaseQueue[increaseQueueLength++] = +- ((long)(offX + (offZ << COORDINATE_BITS) + encodeOffset) & ((1L << (COORDINATE_BITS + COORDINATE_BITS)) - 1)) | ++ ((long) (offX + (offZ << COORDINATE_BITS) + encodeOffset) & ((1L << (COORDINATE_BITS + COORDINATE_BITS)) - 1)) | + ((currentLevel & (LEVEL_COUNT - 1L)) << (COORDINATE_BITS + COORDINATE_BITS)) | + (FLAG_RECHECK_LEVEL | (ALL_DIRECTIONS_BITSET << (COORDINATE_BITS + COORDINATE_BITS + LEVEL_BITS))); + continue; +@@ -1257,8 +1271,8 @@ public abstract class ThreadedTicketLevelPropagator { + //currentPropagation ^= (bitsetLine1 | bitsetLine2 | bitsetLine3); + + // update level +- section.levels[localIndex] = (short)((currentStoredLevel & ~0xFF)); +- updatedPositions.putAndMoveToLast(CoordinateUtils.getChunkKey(offX, offZ), (byte)0); ++ section.levels[localIndex] = (short) ((currentStoredLevel & ~0xFF)); ++ updatedPositions.putAndMoveToLast(CoordinateUtils.getChunkKey(offX, offZ), (byte) 0); + + if (sourceLevel != 0) { + // re-propagate source +@@ -1267,7 +1281,7 @@ public abstract class ThreadedTicketLevelPropagator { + increaseQueue = this.resizeIncreaseQueue(); + } + increaseQueue[increaseQueueLength++] = +- ((long)(offX + (offZ << COORDINATE_BITS) + encodeOffset) & ((1L << (COORDINATE_BITS + COORDINATE_BITS)) - 1)) | ++ ((long) (offX + (offZ << COORDINATE_BITS) + encodeOffset) & ((1L << (COORDINATE_BITS + COORDINATE_BITS)) - 1)) | + ((sourceLevel & (LEVEL_COUNT - 1L)) << (COORDINATE_BITS + COORDINATE_BITS)) | + (FLAG_WRITE_LEVEL | (ALL_DIRECTIONS_BITSET << (COORDINATE_BITS + COORDINATE_BITS + LEVEL_BITS))); + } +@@ -1278,7 +1292,7 @@ public abstract class ThreadedTicketLevelPropagator { + // the child bitset is 4x4, so we just shift each line by 4 + // add the propagation bitset offset to each line to make it easy to OR it into the propagation queue value + final long childPropagation = +- ((bitsetLine1 >>> (start)) << (COORDINATE_BITS + COORDINATE_BITS + LEVEL_BITS)) | // z = -1 ++ ((bitsetLine1 >>> (start)) << (COORDINATE_BITS + COORDINATE_BITS + LEVEL_BITS)) | // z = -1 + ((bitsetLine2 >>> (start + 8)) << (4 + COORDINATE_BITS + COORDINATE_BITS + LEVEL_BITS)) | // z = 0 + ((bitsetLine3 >>> (start + (8 + 8))) << (4 + 4 + COORDINATE_BITS + COORDINATE_BITS + LEVEL_BITS)); // z = 1 + +@@ -1288,7 +1302,7 @@ public abstract class ThreadedTicketLevelPropagator { + queue = this.resizeDecreaseQueue(); + } + queue[queueLength++] = +- ((long)(offX + (offZ << COORDINATE_BITS) + encodeOffset) & ((1L << (COORDINATE_BITS + COORDINATE_BITS)) - 1)) | ++ ((long) (offX + (offZ << COORDINATE_BITS) + encodeOffset) & ((1L << (COORDINATE_BITS + COORDINATE_BITS)) - 1)) | + ((toPropagate & (LEVEL_COUNT - 1L)) << (COORDINATE_BITS + COORDINATE_BITS)) | + (ALL_DIRECTIONS_BITSET << (COORDINATE_BITS + COORDINATE_BITS + LEVEL_BITS)); //childPropagation; + continue; diff --git a/leaf-server/minecraft-patches/features/0158-Optimise-MobEffectUtil-getDigSpeedAmplification.patch b/leaf-server/minecraft-patches/features/0158-Optimise-MobEffectUtil-getDigSpeedAmplification.patch new file mode 100644 index 00000000..3550d539 --- /dev/null +++ b/leaf-server/minecraft-patches/features/0158-Optimise-MobEffectUtil-getDigSpeedAmplification.patch @@ -0,0 +1,29 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Taiyou06 +Date: Mon, 14 Apr 2025 18:07:21 +0200 +Subject: [PATCH] Optimise MobEffectUtil#getDigSpeedAmplification + + +diff --git a/net/minecraft/world/effect/MobEffectUtil.java b/net/minecraft/world/effect/MobEffectUtil.java +index cbf1b6af928aa439c3264b302e5f1a1ddd4c14f0..c59a503ef8bc2dabcf9f7c85c8d93fb1fcadf71f 100644 +--- a/net/minecraft/world/effect/MobEffectUtil.java ++++ b/net/minecraft/world/effect/MobEffectUtil.java +@@ -29,12 +29,14 @@ public final class MobEffectUtil { + public static int getDigSpeedAmplification(LivingEntity entity) { + int i = 0; + int i1 = 0; +- if (entity.hasEffect(MobEffects.DIG_SPEED)) { +- i = entity.getEffect(MobEffects.DIG_SPEED).getAmplifier(); ++ MobEffectInstance digEffect = entity.getEffect(MobEffects.DIG_SPEED); ++ if (digEffect != null) { ++ i = digEffect.getAmplifier(); + } + +- if (entity.hasEffect(MobEffects.CONDUIT_POWER)) { +- i1 = entity.getEffect(MobEffects.CONDUIT_POWER).getAmplifier(); ++ MobEffectInstance conduitEffect = entity.getEffect(MobEffects.CONDUIT_POWER); ++ if (conduitEffect != null) { ++ i1 = conduitEffect.getAmplifier(); + } + + return Math.max(i, i1); diff --git a/leaf-server/minecraft-patches/features/0135-Optimize-chunkUnload.patch b/leaf-server/minecraft-patches/features/0159-Optimise-chunkUnloads.patch similarity index 51% rename from leaf-server/minecraft-patches/features/0135-Optimize-chunkUnload.patch rename to leaf-server/minecraft-patches/features/0159-Optimise-chunkUnloads.patch index ce7a01ab..d9564a21 100644 --- a/leaf-server/minecraft-patches/features/0135-Optimize-chunkUnload.patch +++ b/leaf-server/minecraft-patches/features/0159-Optimise-chunkUnloads.patch @@ -1,9 +1,47 @@ From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 From: Taiyou06 -Date: Fri, 28 Feb 2025 01:35:49 +0100 -Subject: [PATCH] Optimize chunkUnload +Date: Mon, 14 Apr 2025 20:07:52 +0200 +Subject: [PATCH] Optimise chunkUnloads +diff --git a/ca/spottedleaf/moonrise/patches/chunk_system/scheduling/NewChunkHolder.java b/ca/spottedleaf/moonrise/patches/chunk_system/scheduling/NewChunkHolder.java +index e4a5fa25ed368fc4662c30934da2963ef446d782..62ad5fe1196cd982b0d48b1e4903d036262ac54b 100644 +--- a/ca/spottedleaf/moonrise/patches/chunk_system/scheduling/NewChunkHolder.java ++++ b/ca/spottedleaf/moonrise/patches/chunk_system/scheduling/NewChunkHolder.java +@@ -1753,23 +1753,20 @@ public final class NewChunkHolder { + chunk.tryMarkSaved(); + + final CallbackCompletable completable = new CallbackCompletable<>(); +- +- final Runnable run = () -> { +- final CompoundTag data = chunkData.write(); +- +- completable.complete(data); +- +- if (unloading) { +- NewChunkHolder.this.completeAsyncUnloadDataSave(MoonriseRegionFileIO.RegionFileType.CHUNK_DATA, data); +- } +- }; +- + final PrioritisedExecutor.PrioritisedTask task; ++ + if (unloading) { +- this.chunkDataUnload.toRun().setRunnable(run); ++ this.chunkDataUnload.toRun().setRunnable(() -> { ++ final CompoundTag data = chunkData.write(); ++ completable.complete(data); ++ NewChunkHolder.this.completeAsyncUnloadDataSave(MoonriseRegionFileIO.RegionFileType.CHUNK_DATA, data); ++ }); + task = this.chunkDataUnload.task(); + } else { +- task = this.scheduler.saveExecutor.createTask(run); ++ task = this.scheduler.saveExecutor.createTask(() -> { ++ final CompoundTag data = chunkData.write(); ++ completable.complete(data); ++ }); + } + + task.queue(); diff --git a/ca/spottedleaf/moonrise/patches/starlight/light/SWMRNibbleArray.java b/ca/spottedleaf/moonrise/patches/starlight/light/SWMRNibbleArray.java index 4ca68a903e67606fc4ef0bfa9862a73797121c8b..bed3a64388bb43e47c2ba4e67f7dde5b990d9578 100644 --- a/ca/spottedleaf/moonrise/patches/starlight/light/SWMRNibbleArray.java @@ -211,3 +249,128 @@ index b8ac6a9ba7b56ccd034757f7d135d272b8e69e90..dc158e981199b507531af810ff9ced3c return new LevelChunkSection(this); } } +diff --git a/net/minecraft/world/level/chunk/storage/SerializableChunkData.java b/net/minecraft/world/level/chunk/storage/SerializableChunkData.java +index e9ece9b618b0a9eb82b9f07a09ee6cb60cf7ec16..18d2ec110fc6670edb079eccf448389dc365eb88 100644 +--- a/net/minecraft/world/level/chunk/storage/SerializableChunkData.java ++++ b/net/minecraft/world/level/chunk/storage/SerializableChunkData.java +@@ -526,14 +526,14 @@ public record SerializableChunkData( + throw new IllegalArgumentException("Chunk can't be serialized: " + chunk); + } else { + ChunkPos pos = chunk.getPos(); +- List list = new ArrayList<>(); final List sectionsList = list; // Paper - starlight - OBFHELPER +- LevelChunkSection[] sections = chunk.getSections(); +- LevelLightEngine lightEngine = level.getChunkSource().getLightEngine(); + + // Paper start - starlight + final int minLightSection = ca.spottedleaf.moonrise.common.util.WorldUtil.getMinLightSection(level); + final int maxLightSection = ca.spottedleaf.moonrise.common.util.WorldUtil.getMaxLightSection(level); + final int minBlockSection = ca.spottedleaf.moonrise.common.util.WorldUtil.getMinSection(level); ++ // Pre-allocate with correct capacity to avoid resizing ++ final int expectedSectionCount = maxLightSection - minLightSection + 1; ++ List list = new ArrayList<>(expectedSectionCount); + + final LevelChunkSection[] chunkSections = chunk.getSections(); + final ca.spottedleaf.moonrise.patches.starlight.light.SWMRNibbleArray[] blockNibbles = ((ca.spottedleaf.moonrise.patches.starlight.chunk.StarlightChunk)chunk).starlight$getBlockNibbles(); +@@ -551,10 +551,18 @@ public record SerializableChunkData( + continue; + } + ++ DataLayer blockDataLayer = null; ++ if (blockNibble != null && blockNibble.data != null) { ++ blockDataLayer = new DataLayer(blockNibble.data); ++ } ++ ++ DataLayer skyDataLayer = null; ++ if (skyNibble != null && skyNibble.data != null) { ++ skyDataLayer = new DataLayer(skyNibble.data); ++ } ++ + final SerializableChunkData.SectionData sectionData = new SerializableChunkData.SectionData( +- lightSection, chunkSection, +- blockNibble == null ? null : (blockNibble.data == null ? null : new DataLayer(blockNibble.data)), +- skyNibble == null ? null : (skyNibble.data == null ? null : new DataLayer(skyNibble.data)) ++ lightSection, chunkSection, blockDataLayer, skyDataLayer + ); + + if (blockNibble != null) { +@@ -565,28 +573,42 @@ public record SerializableChunkData( + ((ca.spottedleaf.moonrise.patches.starlight.storage.StarlightSectionData)(Object)sectionData).starlight$setSkyLightState(skyNibble.state); + } + +- sectionsList.add(sectionData); ++ list.add(sectionData); + } + // Paper end - starlight + +- List list1 = new ArrayList<>(chunk.getBlockEntitiesPos().size()); ++ // Pre-allocate block entities list with exact size needed ++ final int blockEntityCount = chunk.getBlockEntitiesPos().size(); ++ List list1 = blockEntityCount > 0 ? new ArrayList<>(blockEntityCount) : java.util.Collections.emptyList(); + +- for (BlockPos blockPos : chunk.getBlockEntitiesPos()) { +- CompoundTag blockEntityNbtForSaving = chunk.getBlockEntityNbtForSaving(blockPos, level.registryAccess()); +- if (blockEntityNbtForSaving != null) { +- list1.add(blockEntityNbtForSaving); ++ if (blockEntityCount > 0) { ++ for (BlockPos blockPos : chunk.getBlockEntitiesPos()) { ++ CompoundTag blockEntityNbtForSaving = chunk.getBlockEntityNbtForSaving(blockPos, level.registryAccess()); ++ if (blockEntityNbtForSaving != null) { ++ list1.add(blockEntityNbtForSaving); ++ } + } + } + +- List list2 = new ArrayList<>(); ++ // For entities, use an initial estimated capacity if it's a ProtoChunk ++ List list2; + long[] longs = null; ++ + if (chunk.getPersistedStatus().getChunkType() == ChunkType.PROTOCHUNK) { + ProtoChunk protoChunk = (ProtoChunk)chunk; +- list2.addAll(protoChunk.getEntities()); ++ int entitySize = protoChunk.getEntities().size(); ++ if (entitySize > 0) { ++ list2 = new ArrayList<>(Math.max(16, entitySize)); ++ list2.addAll(protoChunk.getEntities()); ++ } else { ++ list2 = java.util.Collections.emptyList(); ++ } + CarvingMask carvingMask = protoChunk.getCarvingMask(); + if (carvingMask != null) { + longs = carvingMask.toArray(); + } ++ } else { ++ list2 = java.util.Collections.emptyList(); + } + + Map map = new EnumMap<>(Heightmap.Types.class); +@@ -594,14 +616,25 @@ public record SerializableChunkData( + for (Entry entry : chunk.getHeightmaps()) { + if (chunk.getPersistedStatus().heightmapsAfter().contains(entry.getKey())) { + long[] rawData = entry.getValue().getRawData(); +- map.put(entry.getKey(), (long[])rawData.clone()); ++ map.put(entry.getKey(), Arrays.copyOf(rawData, rawData.length)); + } + } + + ChunkAccess.PackedTicks ticksForSerialization = chunk.getTicksForSerialization(level.getGameTime()); +- ShortList[] lists = Arrays.stream(chunk.getPostProcessing()) +- .map(list3 -> list3 != null ? new ShortArrayList(list3) : null) +- .toArray(ShortList[]::new); ++ // Leaf start - Some Optimizations on SerializableChunkData ++ ShortList[] postProcessing = chunk.getPostProcessing(); ++ ShortList[] lists = new ShortList[postProcessing.length]; ++ for (int i = 0; i < postProcessing.length; i++) { ++ ShortList source = postProcessing[i]; ++ // Only create a new list if there's actual data to copy ++ if (source != null) { ++ int size = source.size(); ++ if (size > 0) { ++ lists[i] = new ShortArrayList(size); ++ lists[i].addAll(source); ++ } ++ } ++ } + CompoundTag compoundTag = packStructureData( + StructurePieceSerializationContext.fromLevel(level), pos, chunk.getAllStarts(), chunk.getAllReferences() + ); diff --git a/leaf-server/src/main/java/org/dreeam/leaf/util/map/spottedleaf/LeafConcurrentLong2ReferenceChainedHashTable.java b/leaf-server/src/main/java/org/dreeam/leaf/util/map/spottedleaf/LeafConcurrentLong2ReferenceChainedHashTable.java new file mode 100644 index 00000000..2e6719b8 --- /dev/null +++ b/leaf-server/src/main/java/org/dreeam/leaf/util/map/spottedleaf/LeafConcurrentLong2ReferenceChainedHashTable.java @@ -0,0 +1,2102 @@ +package org.dreeam.leaf.util.map.spottedleaf; + +import ca.spottedleaf.concurrentutil.function.BiLong1Function; +import ca.spottedleaf.concurrentutil.util.ConcurrentUtil; +import ca.spottedleaf.concurrentutil.util.HashUtil; +import ca.spottedleaf.concurrentutil.util.IntegerUtil; +import ca.spottedleaf.concurrentutil.util.ThrowUtil; +import ca.spottedleaf.concurrentutil.util.Validate; + +import java.lang.invoke.VarHandle; +import java.util.*; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.LongAdder; +import java.util.function.BiFunction; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.function.LongConsumer; +import java.util.function.LongFunction; +import java.util.function.Predicate; + +/** + * Optimized concurrent hashtable implementation supporting mapping arbitrary {@code long} keys onto non-null {@code Object} + * values with support for multiple writer and multiple reader threads. Utilizes lock-free read paths, + * optimistic lock-free write attempts, and fine-grained locking during modifications and resizing. + * + *

Happens-before relationship

+ *

+ * As with {@link ConcurrentMap}, actions in a thread prior to placing an object into this map + * happen-before actions subsequent to the access or removal of that object in another thread. + *

+ * + *

Atomicity of functional methods

+ *

+ * Functional methods (like {@code compute}, {@code merge}, etc.) are performed atomically per key. + * The function provided is guaranteed to be invoked at most once per call under a lock specific to the + * entry's bin. Consequently, invoking other map modification methods on this map from within the function + * can lead to undefined behavior or deadlock. + *

+ * + * @param The type of mapped values (must be non-null). + * @see java.util.concurrent.ConcurrentHashMap + */ + +public class LeafConcurrentLong2ReferenceChainedHashTable implements Iterable> { + + // --- Constants --- + + protected static final int DEFAULT_CAPACITY = 16; + protected static final float DEFAULT_LOAD_FACTOR = 0.75f; + /** The maximum capacity, used if a higher value is implicitly specified by either + * of the constructors with arguments. MUST be a power of two <= 1<<30. + */ + protected static final int MAXIMUM_CAPACITY = 1 << 30; // 2^30 + + protected static final int THRESHOLD_NO_RESIZE = -1; // Sentinel value: table cannot be resized + protected static final int THRESHOLD_RESIZING = -2; // Sentinel value: table is currently resizing + + // --- Instance Fields --- + + /** Tracks the number of mappings, using LongAdder for better high-contention performance. */ + protected final LongAdder size = new LongAdder(); + + /** The load factor for the hash table. */ + protected final float loadFactor; + + /** The hash table array. Elements are accessed using VarHandles. */ + protected volatile TableEntry[] table; + + /** + * The next size value at which to resize (unless {@code <= 0}). + * Accessed via VarHandle {@link #THRESHOLD_HANDLE}. + */ + protected volatile int threshold; + + // --- VarHandles --- + + protected static final VarHandle THRESHOLD_HANDLE; + static { + try { + THRESHOLD_HANDLE = ConcurrentUtil.getVarHandle(LeafConcurrentLong2ReferenceChainedHashTable.class, "threshold", int.class); + } catch (Throwable t) { + throw new Error("Failed to initialize VarHandles", t); + } + // Static initialization for TableEntry VarHandles is inside the TableEntry class + } + + // --- Views (lazily initialized) --- + + protected transient Values values; + protected transient EntrySet entrySet; + + // --- Constructors --- + + /** + * Creates a new, empty map with the default initial capacity (16) and load factor (0.75). + */ + public LeafConcurrentLong2ReferenceChainedHashTable() { + this(DEFAULT_CAPACITY, DEFAULT_LOAD_FACTOR); + } + + /** + * Creates a new, empty map with the specified initial capacity and load factor. + * + * @param initialCapacity The initial capacity. The implementation performs internal + * sizing to accommodate this many elements. + * @param loadFactor The load factor threshold, used to control resizing. + * @throws IllegalArgumentException if the initial capacity is negative or the load + * factor is non-positive or NaN. + */ + @SuppressWarnings("unchecked") + protected LeafConcurrentLong2ReferenceChainedHashTable(final int initialCapacity, final float loadFactor) { + if (loadFactor <= 0.0f || !Float.isFinite(loadFactor)) { + throw new IllegalArgumentException("Invalid load factor: " + loadFactor); + } + if (initialCapacity < 0) { + throw new IllegalArgumentException("Invalid initial capacity: " + initialCapacity); + } + + final int tableSize = getCapacityFor(initialCapacity); + this.loadFactor = loadFactor; + this.setThresholdPlain(getTargetThreshold(tableSize, loadFactor)); // Use plain set, happens-before established by volatile table write + this.table = (TableEntry[]) new TableEntry[tableSize]; // Volatile write publishes the initial state + } + + /** + * Creates a new, empty map with the specified initial capacity and the default load factor (0.75). + * + * @param capacity The initial capacity. + * @throws IllegalArgumentException if the initial capacity is negative. + */ + public static LeafConcurrentLong2ReferenceChainedHashTable createWithCapacity(final int capacity) { + return createWithCapacity(capacity, DEFAULT_LOAD_FACTOR); + } + + /** + * Creates a new, empty map with the specified initial capacity and load factor. + * + * @param capacity The initial capacity. + * @param loadFactor The load factor threshold. + * @throws IllegalArgumentException if the initial capacity is negative or the load factor is non-positive/NaN. + */ + public static LeafConcurrentLong2ReferenceChainedHashTable createWithCapacity(final int capacity, final float loadFactor) { + return new LeafConcurrentLong2ReferenceChainedHashTable<>(capacity, loadFactor); + } + + /** + * Creates a new, empty map with an initial capacity sufficient to hold the specified number of elements + * without resizing, using the default load factor (0.75). + * + * @param expected The expected number of elements. + * @throws IllegalArgumentException if the expected size is negative. + */ + public static LeafConcurrentLong2ReferenceChainedHashTable createWithExpected(final int expected) { + return createWithExpected(expected, DEFAULT_LOAD_FACTOR); + } + + /** + * Creates a new, empty map with an initial capacity sufficient to hold the specified number of elements + * without resizing, using the specified load factor. + * + * @param expected The expected number of elements. + * @param loadFactor The load factor threshold. + * @throws IllegalArgumentException if the expected size is negative or the load factor is non-positive/NaN. + */ + public static LeafConcurrentLong2ReferenceChainedHashTable createWithExpected(final int expected, final float loadFactor) { + if (expected < 0) { + throw new IllegalArgumentException("Invalid expected size: " + expected); + } + // Calculate initial capacity based on expected size and load factor + final double capacityEstimate = ((double) expected / (double) loadFactor) + 1.0; + final int capacity = (capacityEstimate >= (double) MAXIMUM_CAPACITY) + ? MAXIMUM_CAPACITY + : (int) Math.min(MAXIMUM_CAPACITY, Math.max(DEFAULT_CAPACITY, Math.ceil(capacityEstimate))); + return createWithCapacity(capacity, loadFactor); + } + + // --- Internal Helper Methods --- + + /** Calculates the target resize threshold. */ + protected static int getTargetThreshold(final int capacity, final float loadFactor) { + if (capacity >= MAXIMUM_CAPACITY) { + return THRESHOLD_NO_RESIZE; // Max capacity reached, no more resizing + } + // Calculate threshold, preventing overflow and ensuring it's at least 1 + final double calculatedThreshold = (double) capacity * (double) loadFactor; + if (calculatedThreshold >= (double) MAXIMUM_CAPACITY) { + return MAXIMUM_CAPACITY; // Cap threshold at maximum capacity if calculation exceeds it + } + // Use ceil to ensure threshold is met strictly *after* the size reaches it + return (int) Math.max(1, Math.ceil(calculatedThreshold)); + } + + + /** Calculates the power-of-two capacity for a given initial capacity request. */ + protected static int getCapacityFor(final int requestedCapacity) { + if (requestedCapacity <= 0) { + // Default capacity if non-positive requested, could also throw exception + return DEFAULT_CAPACITY; + } + if (requestedCapacity >= MAXIMUM_CAPACITY) { + return MAXIMUM_CAPACITY; + } + // Round up to the next power of two + return IntegerUtil.roundCeilLog2(Math.max(DEFAULT_CAPACITY, requestedCapacity)); + } + + /** Computes the hash code for the key. Uses mixing to spread keys more evenly. */ + protected static int getHash(final long key) { + return (int) HashUtil.mix(key); // Assumes HashUtil.mix provides good distribution + } + + /** Returns the load factor associated with this map. */ + public final float getLoadFactor() { + return this.loadFactor; + } + + // --- VarHandle Accessors for 'threshold' --- + + protected final int getThresholdAcquire() { + return (int) THRESHOLD_HANDLE.getAcquire(this); + } + + protected final int getThresholdVolatile() { + return (int) THRESHOLD_HANDLE.getVolatile(this); + } + + protected final void setThresholdPlain(final int threshold) { + THRESHOLD_HANDLE.set(this, threshold); + } + + protected final void setThresholdRelease(final int threshold) { + THRESHOLD_HANDLE.setRelease(this, threshold); + } + + protected final void setThresholdVolatile(final int threshold) { + THRESHOLD_HANDLE.setVolatile(this, threshold); + } + + protected final int compareExchangeThresholdVolatile(final int expect, final int update) { + return (int) THRESHOLD_HANDLE.compareAndExchange(this, expect, update); + } + + // --- VarHandle Accessors for 'table' array elements --- + + @SuppressWarnings("unchecked") + protected static TableEntry getAtIndexVolatile(final TableEntry[] table, final int index) { + return (TableEntry) TableEntry.TABLE_ENTRY_ARRAY_HANDLE.getVolatile(table, index); + } + + protected static void setAtIndexRelease(final TableEntry[] table, final int index, final TableEntry value) { + TableEntry.TABLE_ENTRY_ARRAY_HANDLE.setRelease(table, index, value); + } + + protected static void setAtIndexVolatile(final TableEntry[] table, final int index, final TableEntry value) { + TableEntry.TABLE_ENTRY_ARRAY_HANDLE.setVolatile(table, index, value); + } + + @SuppressWarnings("unchecked") + protected static TableEntry compareAndExchangeAtIndexVolatile(final TableEntry[] table, final int index, + final TableEntry expect, final TableEntry update) { + return (TableEntry) TableEntry.TABLE_ENTRY_ARRAY_HANDLE.compareAndExchange(table, index, expect, update); + } + + // --- Core Map Operations --- + + /** + * Retrieves the node associated with the key. This is the core lookup logic. + * It handles concurrent resizes without locking for reads. + * Returns null if the key is not found. + * The returned node's value might be null if it's a placeholder during a compute operation. + */ + @SuppressWarnings("unchecked") + protected final TableEntry getNode(final long key) { + final int hash = getHash(key); + TableEntry[] currentTable = this.table; // Volatile read + + outer_loop: + for (;;) { // Loop handles table resizes detected during traversal + final int tableLength = currentTable.length; + if (tableLength == 0) { + // Table might not be initialized yet (race in constructor?), re-read. + currentTable = this.table; + if (currentTable.length == 0) { + // Still not initialized? Should not happen normally. Return null safely. + return null; + } + continue; // Retry with the initialized table + } + + final int index = hash & (tableLength - 1); // Calculate index using mask + TableEntry head = getAtIndexVolatile(currentTable, index); // Volatile read of bin head + + if (head == null) { + return null; // Bin is empty + } + + // Check if the bin head is a resize marker + if (head.isResizeMarker()) { + currentTable = helpResizeOrGetNextTable(currentTable, head); + continue outer_loop; // Retry operation with the new table + } + + // Check if the head node itself contains the key + // Reduces chain traversal for head hits + if (head.key == key) { + return head; + } + + // Traverse the linked list (chain) in the bin + // Volatile read is necessary here to observe concurrent modifications (removals/resizes) + TableEntry node = head.getNextVolatile(); + while (node != null) { + if (node.key == key) { + return node; // Key found + } + node = node.getNextVolatile(); // Move to the next node using volatile read + } + + // Key not found in the chain. + // Crucial check: Re-read table reference to see if a resize occurred *during* traversal. + TableEntry[] latestTable = this.table; // Volatile read + if (currentTable != latestTable) { + // Table reference changed, a resize happened. Retry the whole lookup. + currentTable = latestTable; + continue outer_loop; + } + + // Key not found, and table reference is stable since traversal started. + return null; + } + } + + /** + * Helps with resizing or gets the reference to the next table if the current + * bin contains a resize marker. + */ + @SuppressWarnings("unchecked") + private TableEntry[] helpResizeOrGetNextTable(TableEntry[] currentTable, TableEntry resizeMarker) { + // The new table reference is stored in the 'value' field of the resize marker + V markerValue = resizeMarker.getValuePlain(); // Plain read is safe, marker itself is effectively final + if (markerValue instanceof TableEntry[]) { + // Consider adding active resizing help here in a contended scenario + return (TableEntry[]) markerValue; + } + // Fallback: Should not happen if markers are correct. Force retry by re-reading table. + return this.table; + } + + + /** + * Returns the value to which the specified key is mapped, + * or {@code null} if this map contains no mapping for the key. + * + * @param key the key whose associated value is to be returned + * @return the value mapped to the key, or {@code null} if none + */ + public V get(final long key) { + final TableEntry node = this.getNode(key); + // Use volatile read on value to ensure happens-before visibility + return (node == null) ? null : node.getValueVolatile(); + } + + /** + * Returns the value to which the specified key is mapped, or + * {@code defaultValue} if this map contains no mapping for the key. + * + * @param key the key whose associated value is to be returned + * @param defaultValue the default mapping of the key + * @return the value mapped to the key, or {@code defaultValue} if none + */ + public V getOrDefault(final long key, final V defaultValue) { + final TableEntry node = this.getNode(key); + if (node == null) { + return defaultValue; + } + // Use volatile read for visibility. Check for null in case it's a compute placeholder. + final V value = node.getValueVolatile(); + return (value == null) ? defaultValue : value; + } + + /** + * Returns {@code true} if this map contains a mapping for the specified key. + * + * @param key The key whose presence in this map is to be tested + * @return {@code true} if this map contains a mapping for the specified key + */ + public boolean containsKey(final long key) { + final TableEntry node = this.getNode(key); + // Must check value is non-null, as getNode might return a placeholder + return node != null && node.getValueVolatile() != null; // Volatile read for visibility + } + + /** + * Returns {@code true} if this map maps one or more keys to the specified value. + * Note: This operation requires traversing the entire map. + * + * @param value value whose presence in this map is to be tested + * @return {@code true} if this map maps one or more keys to the specified value + * @throws NullPointerException if the specified value is null + */ + public boolean containsValue(final V value) { + Validate.notNull(value, "Value cannot be null"); + // Use an iterator that handles concurrent modifications and resizes safely. + NodeIterator iterator = new NodeIterator<>(this.table, this); + TableEntry node; + while ((node = iterator.findNext()) != null) { // findNext safely iterates through nodes + V nodeValue = node.getValueVolatile(); // Volatile read for visibility + if (nodeValue != null && value.equals(nodeValue)) { + return true; + } + } + return false; + } + + /** + * Returns the number of key-value mappings in this map. If the + * number of elements exceeds {@code Integer.MAX_VALUE}, returns + * {@code Integer.MAX_VALUE}. + * + * @return the number of key-value mappings in this map + */ + public int size() { + final long ret = this.size.sum(); + // Cap the size at Integer.MAX_VALUE as per ConcurrentMap contract + return (ret >= (long) Integer.MAX_VALUE) ? Integer.MAX_VALUE : (int) ret; + } + + /** + * Returns {@code true} if this map contains no key-value mappings. + * + * @return {@code true} if this map contains no key-value mappings + */ + public boolean isEmpty() { + // Check size first for a quick exit, but verify with iteration if size is 0 + // as LongAdder.sum() might be transiently inaccurate. + if (this.size.sum() > 0L) { + return false; + } + // If size reports 0, double-check by looking for any actual node + NodeIterator it = new NodeIterator<>(this.table, this); + return it.findNext() == null; + } + + /** + * Increments the size count and initiates resizing if the threshold is exceeded. + */ + protected final void addSize(final long count) { + this.size.add(count); + int currentThreshold; + do { + currentThreshold = this.getThresholdAcquire(); // Acquire fence for reading threshold + if (currentThreshold <= 0) return; // THRESHOLD_NO_RESIZE or THRESHOLD_RESIZING + + final long currentSum = this.size.sum(); // Get current estimated size + if (currentSum < (long) currentThreshold) { + // Double check threshold hasn't changed due to another thread finishing resize + if (currentThreshold == this.getThresholdVolatile()) return; + continue; // Threshold changed, retry the loop + } + + // Size exceeds threshold, attempt to initiate resize + if (this.compareExchangeThresholdVolatile(currentThreshold, THRESHOLD_RESIZING) == currentThreshold) { + this.resize(currentSum); // Pass estimated size + return; // Resize initiated or completed + } + // CAS failed, another thread initiated resize. Loop might retry. + } while (true); + } + + /** + * Decrements the size count. + */ + protected final void subSize(final long count) { + this.size.add(-count); + // Note: No resize check needed on removal + } + + /** + * Resizes the table to accommodate more entries. Called by the thread + * that successfully sets the threshold to THRESHOLD_RESIZING. + */ + @SuppressWarnings("unchecked") + private void resize(final long estimatedSize) { // estimatedSize might not be perfectly accurate + final TableEntry[] oldTable = this.table; // Volatile read + final int oldCapacity = oldTable.length; + + if (oldCapacity >= MAXIMUM_CAPACITY) { + this.setThresholdVolatile(THRESHOLD_NO_RESIZE); + return; + } + + int newCapacity = oldCapacity << 1; // Double the capacity + if (newCapacity <= oldCapacity || newCapacity > MAXIMUM_CAPACITY) { // Handle overflow or max + newCapacity = MAXIMUM_CAPACITY; + } + if (newCapacity == oldCapacity) { // Already maxed out + this.setThresholdVolatile(THRESHOLD_NO_RESIZE); + return; + } + + final int newThreshold = getTargetThreshold(newCapacity, this.loadFactor); + final TableEntry[] newTable = (TableEntry[]) new TableEntry[newCapacity]; + final TableEntry resizeMarker = new TableEntry<>(0L, (V) newTable, true); // Key irrelevant for marker + + for (int i = 0; i < oldCapacity; ++i) { + TableEntry head = getAtIndexVolatile(oldTable, i); + + if (head == null) { + // Try to CAS marker into empty bin + if (compareAndExchangeAtIndexVolatile(oldTable, i, null, resizeMarker) == null) { + continue; // Marked empty bin + } + // CAS failed, re-read + head = getAtIndexVolatile(oldTable, i); + if (head == null || head.isResizeMarker()) continue; // Still null or handled + } + + if (head.isResizeMarker()) { + continue; // Already processed + } + + // Bin has entries, lock head to transfer chain + synchronized (head) { + TableEntry currentHead = getAtIndexVolatile(oldTable, i); + // Re-check after lock + if (currentHead != head) { + i--; // Reprocess index 'i' if head changed while waiting + continue; + } + if (head.isResizeMarker()) { + continue; // Marked while waiting + } + + // Split chain: index 'i' vs 'i + oldCapacity' + TableEntry lowH = null, lowT = null; + TableEntry highH = null, highT = null; + + TableEntry current = head; + while (current != null) { + TableEntry next = current.getNextPlain(); // Plain read inside lock + int hash = getHash(current.key); + + if ((hash & oldCapacity) == 0) { // Low bin (index i) + if (lowT == null) lowH = current; else lowT.setNextPlain(current); + lowT = current; + } else { // High bin (index i + oldCapacity) + if (highT == null) highH = current; else highT.setNextPlain(current); + highT = current; + } + current = next; + } + + if (lowT != null) lowT.setNextPlain(null); + if (highT != null) highT.setNextPlain(null); + + // Place chains into new table (volatile writes) + setAtIndexVolatile(newTable, i, lowH); + setAtIndexVolatile(newTable, i + oldCapacity, highH); + + // Mark old bin as processed (release write) + setAtIndexRelease(oldTable, i, resizeMarker); + } // End synchronized + } // End loop over old table bins + + // Finalize: publish new table and threshold + this.table = newTable; + this.setThresholdVolatile(newThreshold); + } + + + /** + * Maps the specified key to the specified value in this table. + * Neither the key nor the value can be null. + * + * @param key key with which the specified value is to be associated + * @param value value to be associated with the specified key + * @return the previous value associated with {@code key}, or + * {@code null} if there was no mapping for {@code key}. + * @throws NullPointerException if the specified value is null + */ + public V put(final long key, final V value) { + Validate.notNull(value, "Value may not be null"); + final int hash = getHash(key); + int sizeDelta = 0; + V oldValue = null; + TableEntry[] currentTable = this.table; + + table_loop: + for (;;) { + final int tableLength = currentTable.length; + if (tableLength == 0) { currentTable = this.table; if (currentTable.length == 0) continue; } // Init check + + final int index = hash & (tableLength - 1); + TableEntry head = getAtIndexVolatile(currentTable, index); + + // Case 1: Bin is empty + if (head == null) { + TableEntry newNode = new TableEntry<>(key, value); + if (compareAndExchangeAtIndexVolatile(currentTable, index, null, newNode) == null) { + this.addSize(1L); + return null; // Inserted successfully + } + continue table_loop; // CAS failed, retry + } + + // Case 2: Resize marker + if (head.isResizeMarker()) { + currentTable = helpResizeOrGetNextTable(currentTable, head); + continue table_loop; + } + + // Case 3: Optimistic lock-free update attempt + TableEntry node = head; + while (node != null) { + if (node.key == key) { + V currentVal = node.getValueVolatile(); // Volatile read + if (currentVal == null) break; // Placeholder requires lock + // Try atomic update + if (node.compareAndSetValueVolatile(currentVal, value)) { + return currentVal; // Lock-free success + } + break; // CAS failed, need lock + } + node = node.getNextVolatile(); // Volatile read + } + + // Case 4: Locking path + synchronized (head) { + TableEntry currentHead = getAtIndexVolatile(currentTable, index); + // Re-check state after lock + if (currentHead != head || head.isResizeMarker()) { + continue table_loop; + } + + // Traverse again within lock + TableEntry prev = null; + node = head; + while (node != null) { + if (node.key == key) { + oldValue = node.getValuePlain(); // Plain read in lock + node.setValueVolatile(value); // Volatile write for visibility + sizeDelta = (oldValue == null) ? 1 : 0; // Adjust size if replacing placeholder + break table_loop; // Update done + } + prev = node; + node = node.getNextPlain(); // Plain read in lock + } + + // Key not found, add new node to end of chain + if (prev != null) { + TableEntry newNode = new TableEntry<>(key, value); + prev.setNextRelease(newNode); // Release write to link safely + sizeDelta = 1; + oldValue = null; + } else { + // Should not happen if head was non-null/non-marker. Retry. + continue table_loop; + } + } // End synchronized + break table_loop; // Operation completed within lock + } // End table_loop + + if (sizeDelta != 0) { + this.addSize(sizeDelta); + } + return oldValue; + } + + + /** + * If the specified key is not already associated with a value, associates + * it with the given value. + * + * @param key key with which the specified value is to be associated + * @param value value to be associated with the specified key + * @return the previous value associated with the specified key, or + * {@code null} if there was no mapping for the key. + * @throws NullPointerException if the specified value is null + */ + public V putIfAbsent(final long key, final V value) { + Validate.notNull(value, "Value may not be null"); + final int hash = getHash(key); + int sizeDelta = 0; + V existingValue = null; + TableEntry[] currentTable = this.table; + + table_loop: + for(;;) { + final int tableLength = currentTable.length; + if (tableLength == 0) { currentTable = this.table; continue; } + + final int index = hash & (tableLength - 1); + TableEntry head = getAtIndexVolatile(currentTable, index); + + // Case 1: Bin is empty + if (head == null) { + TableEntry newNode = new TableEntry<>(key, value); + if (compareAndExchangeAtIndexVolatile(currentTable, index, null, newNode) == null) { + this.addSize(1L); + return null; // Inserted + } + continue table_loop; // CAS failed, retry + } + + // Case 2: Resize marker + if (head.isResizeMarker()) { + currentTable = helpResizeOrGetNextTable(currentTable, head); + continue table_loop; + } + + // Case 3: Lock-free check (optimistic) + TableEntry node = head; + while(node != null) { + if (node.key == key) { + existingValue = node.getValueVolatile(); // Volatile read + if (existingValue != null) { + return existingValue; // Key present with value + } + // Placeholder found, need lock + break; + } + node = node.getNextVolatile(); + } + + + // Case 4: Locking path + synchronized (head) { + TableEntry currentHead = getAtIndexVolatile(currentTable, index); + if (currentHead != head || head.isResizeMarker()) { + continue table_loop; // State changed, retry + } + + TableEntry prev = null; + node = head; + while (node != null) { + if (node.key == key) { + existingValue = node.getValuePlain(); // Plain read in lock + if (existingValue != null) { + break table_loop; // Return existing value + } else { + // Placeholder: update it + node.setValueVolatile(value); // Volatile write + sizeDelta = 1; + existingValue = null; // Return null as per contract + break table_loop; + } + } + prev = node; + node = node.getNextPlain(); // Plain read in lock + } + + // Key not found, add new node + if (prev != null) { + TableEntry newNode = new TableEntry<>(key, value); + prev.setNextRelease(newNode); // Release write + sizeDelta = 1; + existingValue = null; + } else { + continue table_loop; // Should not happen + } + } // End synchronized + break table_loop; + } // End table_loop + + if (sizeDelta != 0) { + this.addSize(sizeDelta); + } + return existingValue; + } + + /** + * Replaces the entry for a key only if currently mapped to some value. + * + * @param key key with which the specified value is associated + * @param value value to be associated with the specified key + * @return the previous value associated with the specified key, or + * {@code null} if there was no mapping for the key. + * @throws NullPointerException if the specified value is null + */ + public V replace(final long key, final V value) { + Validate.notNull(value, "Value may not be null"); + final int hash = getHash(key); + V oldValue = null; + TableEntry[] currentTable = this.table; + + table_loop: + for(;;) { + final int tableLength = currentTable.length; + if (tableLength == 0) return null; + + final int index = hash & (tableLength - 1); + TableEntry head = getAtIndexVolatile(currentTable, index); + + if (head == null) return null; + + if (head.isResizeMarker()) { + currentTable = helpResizeOrGetNextTable(currentTable, head); + continue table_loop; + } + + // Try Lock-Free Replace Attempt + TableEntry node = head; + while (node != null) { + if (node.key == key) { + do { // CAS retry loop + oldValue = node.getValueVolatile(); // Volatile read + if (oldValue == null) return null; // Cannot replace placeholder + + if (node.compareAndSetValueVolatile(oldValue, value)) { + return oldValue; // Lock-free success + } + // CAS failed, retry if key still matches + } while (node.key == key); + // Key changed or CAS keeps failing, fall back to lock + break; + } + node = node.getNextVolatile(); + } + + // Locking Path + synchronized (head) { + TableEntry currentHead = getAtIndexVolatile(currentTable, index); + if (currentHead != head || head.isResizeMarker()) { + continue table_loop; // Retry + } + node = head; + while (node != null) { + if (node.key == key) { + oldValue = node.getValuePlain(); // Plain read in lock + if (oldValue != null) { + node.setValueVolatile(value); // Volatile write + return oldValue; + } else { + return null; // Cannot replace placeholder + } + } + node = node.getNextPlain(); // Plain read in lock + } + } // End synchronized + + // Key not found after checks + return null; + } // End table_loop + } + + /** + * Replaces the entry for a key only if currently mapped to a given value. + * + * @param key key with which the specified value is associated + * @param expect value expected to be associated with the specified key + * @param update value to be associated with the specified key + * @return {@code true} if the value was replaced + * @throws NullPointerException if {@code expect} or {@code update} is null + */ + public boolean replace(final long key, final V expect, final V update) { + Validate.notNull(expect, "Expected value may not be null"); + Validate.notNull(update, "Update value may not be null"); + final int hash = getHash(key); + TableEntry[] currentTable = this.table; + + table_loop: + for(;;) { + final int tableLength = currentTable.length; + if (tableLength == 0) return false; + + final int index = hash & (tableLength - 1); + TableEntry head = getAtIndexVolatile(currentTable, index); + + if (head == null) return false; + + if (head.isResizeMarker()) { + currentTable = helpResizeOrGetNextTable(currentTable, head); + continue table_loop; + } + + // Lock-Free CAS Attempt + TableEntry node = head; + while (node != null) { + if (node.key == key) { + V currentVal = node.getValueVolatile(); // Volatile read + if (!Objects.equals(currentVal, expect)) { + return false; // Value doesn't match + } + // Value matches, try CAS + if (node.compareAndSetValueVolatile(expect, update)) { + return true; // Lock-free success + } + // CAS failed, need lock + break; + } + node = node.getNextVolatile(); + } + + // Locking Path + synchronized (head) { + TableEntry currentHead = getAtIndexVolatile(currentTable, index); + if (currentHead != head || head.isResizeMarker()) { + continue table_loop; // Retry + } + node = head; + while (node != null) { + if (node.key == key) { + V currentVal = node.getValuePlain(); // Plain read in lock + if (Objects.equals(currentVal, expect)) { + node.setValueVolatile(update); // Volatile write + return true; // Replaced successfully + } else { + return false; // Value doesn't match + } + } + node = node.getNextPlain(); // Plain read in lock + } + } // End synchronized + + // Key not found + return false; + } // End table_loop + } + + /** + * Removes the mapping for a key from this map if it is present. + * + * @param key key whose mapping is to be removed from the map + * @return the previous value associated with {@code key}, or + * {@code null} if there was no mapping for {@code key} + */ + public V remove(final long key) { + final int hash = getHash(key); + int sizeDelta = 0; + V oldValue = null; + TableEntry[] currentTable = this.table; + + table_loop: + for(;;) { + final int tableLength = currentTable.length; + if (tableLength == 0) return null; + + final int index = hash & (tableLength - 1); + TableEntry head = getAtIndexVolatile(currentTable, index); + + if (head == null) return null; + + if (head.isResizeMarker()) { + currentTable = helpResizeOrGetNextTable(currentTable, head); + continue table_loop; + } + + // Removal needs locking + synchronized (head) { + TableEntry currentHead = getAtIndexVolatile(currentTable, index); + if (currentHead != head || head.isResizeMarker()) { + continue table_loop; // Retry + } + + TableEntry prev = null; + TableEntry node = head; + while (node != null) { + if (node.key == key) { + oldValue = node.getValuePlain(); // Plain read in lock + sizeDelta = (oldValue != null) ? -1 : 0; // Decrement if actual mapping + + TableEntry next = node.getNextPlain(); // Plain read + // Update links with release semantics + if (prev == null) { + setAtIndexRelease(currentTable, index, next); // Removed head + } else { + prev.setNextRelease(next); // Removed middle/end + } + break table_loop; // Removed, exit loop + } + prev = node; + node = node.getNextPlain(); // Plain read + } + // Key not found in chain within lock + break table_loop; + } // End synchronized + } // End table_loop + + if (sizeDelta != 0) { + this.subSize(-sizeDelta); // subSize takes positive count + } + return oldValue; + } + + + /** + * Removes the entry for a key only if currently mapped to a given value. + * + * @param key key with which the specified value is associated + * @param expect value expected to be associated with the specified key + * @return {@code true} if the value was removed + */ + public boolean remove(final long key, final V expect) { + final int hash = getHash(key); + int sizeDelta = 0; + boolean removed = false; + TableEntry[] currentTable = this.table; + + table_loop: + for(;;) { + final int tableLength = currentTable.length; + if (tableLength == 0) return false; + + final int index = hash & (tableLength - 1); + TableEntry head = getAtIndexVolatile(currentTable, index); + + if (head == null) return false; + + if (head.isResizeMarker()) { + currentTable = helpResizeOrGetNextTable(currentTable, head); + continue table_loop; + } + + // Removal needs locking + synchronized (head) { + TableEntry currentHead = getAtIndexVolatile(currentTable, index); + if (currentHead != head || head.isResizeMarker()) { + continue table_loop; // Retry + } + + TableEntry prev = null; + TableEntry node = head; + while (node != null) { + if (node.key == key) { + V currentVal = node.getValuePlain(); // Plain read in lock + if (Objects.equals(currentVal, expect)) { // Safe comparison + removed = true; + sizeDelta = (currentVal != null) ? -1 : 0; // Decrement if actual value + + TableEntry next = node.getNextPlain(); // Plain read + // Update links with release semantics + if (prev == null) { + setAtIndexRelease(currentTable, index, next); + } else { + prev.setNextRelease(next); + } + } else { + removed = false; // Value didn't match + } + break table_loop; // Key processed + } + prev = node; + node = node.getNextPlain(); // Plain read + } + // Key not found in chain within lock + break table_loop; + } // End synchronized + } // End table_loop + + if (sizeDelta != 0) { + this.subSize(-sizeDelta); + } + return removed; + } + + /** + * Removes the entry for the specified key only if its value satisfies the given predicate. + * + * @param key key whose mapping is to be removed from the map + * @param predicate the predicate to apply to the value associated with the key + * @return the value associated with the key before removal if the predicate was satisfied and the entry was removed, + * otherwise {@code null}. + * @throws NullPointerException if the specified predicate is null + */ + public V removeIf(final long key, final Predicate predicate) { + Validate.notNull(predicate, "Predicate may not be null"); + final int hash = getHash(key); + int sizeDelta = 0; + V oldValue = null; + boolean removed = false; + TableEntry[] currentTable = this.table; + + table_loop: + for(;;) { + final int tableLength = currentTable.length; + if (tableLength == 0) return null; + + final int index = hash & (tableLength - 1); + TableEntry head = getAtIndexVolatile(currentTable, index); + + if (head == null) return null; + + if (head.isResizeMarker()) { + currentTable = helpResizeOrGetNextTable(currentTable, head); + continue table_loop; + } + + // Conditional removal needs locking + synchronized (head) { + TableEntry currentHead = getAtIndexVolatile(currentTable, index); + if (currentHead != head || head.isResizeMarker()) { + continue table_loop; // Retry + } + + TableEntry prev = null; + TableEntry node = head; + while (node != null) { + if (node.key == key) { + oldValue = node.getValuePlain(); // Plain read in lock + if (oldValue != null && predicate.test(oldValue)) { // Test non-null value + removed = true; + sizeDelta = -1; + + TableEntry next = node.getNextPlain(); // Plain read + // Update links with release semantics + if (prev == null) { + setAtIndexRelease(currentTable, index, next); + } else { + prev.setNextRelease(next); + } + } else { + removed = false; // Predicate failed or value null + } + break table_loop; // Key processed + } + prev = node; + node = node.getNextPlain(); // Plain read + } + // Key not found in chain within lock + break table_loop; + } // End synchronized + } // End table_loop + + if (sizeDelta != 0) { + this.subSize(-sizeDelta); + } + return removed ? oldValue : null; // Return old value only if removed + } + + // --- Compute Methods --- + + /** + * Attempts to compute a mapping for the specified key and its current mapped value + * (or {@code null} if there is no current mapping). The function is + * applied atomically. + * + * @param key key with which the specified value is to be associated + * @param function the function to compute a value + * @return the new value associated with the specified key, or null if none + * @throws NullPointerException if the specified function is null + */ + public V compute(final long key, final BiLong1Function function) { + Validate.notNull(function, "Function cannot be null"); + final int hash = getHash(key); + int sizeDelta = 0; + V finalValue = null; + TableEntry[] currentTable = this.table; + + table_loop: + for(;;) { + final int tableLength = currentTable.length; + if (tableLength == 0) { currentTable = this.table; continue; } + + final int index = hash & (tableLength - 1); + TableEntry head = getAtIndexVolatile(currentTable, index); + + // Case 1: Bin is empty. Use placeholder logic. + if (head == null) { + TableEntry placeholder = new TableEntry<>(key, null); // Temp node + V computedValue; + synchronized (placeholder) { // Lock placeholder for atomicity + if (getAtIndexVolatile(currentTable, index) == null) { // Re-check bin + try { + computedValue = function.apply(key, null); // Compute with null old value + } catch (Throwable t) { ThrowUtil.throwUnchecked(t); return null; } + + if (computedValue != null) { + placeholder.setValuePlain(computedValue); // Set value before CAS + // Attempt to insert the computed node + if (compareAndExchangeAtIndexVolatile(currentTable, index, null, placeholder) == null) { + sizeDelta = 1; + finalValue = computedValue; + break table_loop; // Success + } else { + continue table_loop; // CAS failed, retry + } + } else { + finalValue = null; // Computed null, no mapping + break table_loop; + } + } + } // End synchronized(placeholder) + continue table_loop; // Bin changed, retry + } // End Case 1 (head == null) + + // Case 2: Resize marker + if (head.isResizeMarker()) { + currentTable = helpResizeOrGetNextTable(currentTable, head); + continue table_loop; + } + + // Case 3: Bin not empty. Lock head. + synchronized (head) { + TableEntry currentHead = getAtIndexVolatile(currentTable, index); + if (currentHead != head || head.isResizeMarker()) { + continue table_loop; // Retry + } + + TableEntry prev = null; + TableEntry node = head; + while (node != null) { + if (node.key == key) { + // Key found. Compute with existing value. + V oldValue = node.getValuePlain(); // Plain read in lock + V computedValue; + try { + computedValue = function.apply(key, oldValue); + } catch (Throwable t) { ThrowUtil.throwUnchecked(t); return null; } + + if (computedValue != null) { + node.setValueVolatile(computedValue); // Update value (volatile write) + finalValue = computedValue; + sizeDelta = (oldValue == null) ? 1 : 0; // Size change if old was placeholder + } else { + // Remove mapping + finalValue = null; + sizeDelta = (oldValue != null) ? -1 : 0; // Size change only if old was value + TableEntry next = node.getNextPlain(); // Plain read + if (prev == null) setAtIndexRelease(currentTable, index, next); + else prev.setNextRelease(next); + } + break table_loop; // Done + } + prev = node; + node = node.getNextPlain(); // Plain read + } // End while + + // Key not found. Compute with null. + V computedValue; + try { + computedValue = function.apply(key, null); + } catch (Throwable t) { ThrowUtil.throwUnchecked(t); return null; } + + if (computedValue != null) { + // Add new mapping + finalValue = computedValue; + sizeDelta = 1; + TableEntry newNode = new TableEntry<>(key, computedValue); + if (prev != null) prev.setNextRelease(newNode); // Release write + else { continue table_loop; } // Should not happen + } else { + finalValue = null; + sizeDelta = 0; + } + break table_loop; // Done + } // End synchronized(head) + } // End table_loop + + if (sizeDelta > 0) this.addSize(sizeDelta); + else if (sizeDelta < 0) this.subSize(-sizeDelta); + + return finalValue; + } + + /** + * If the specified key is not already associated with a value, attempts to + * compute its value using the given mapping function and enters it into + * this map unless {@code null}. + * + * @param key key with which the specified value is to be associated + * @param function the function to compute a value + * @return the current (existing or computed) value associated with the specified key, + * or null if the computed value is null + * @throws NullPointerException if the specified function is null + */ + public V computeIfAbsent(final long key, final LongFunction function) { + Validate.notNull(function, "Function cannot be null"); + final int hash = getHash(key); + int sizeDelta = 0; + V finalValue = null; + TableEntry[] currentTable = this.table; + + table_loop: + for(;;) { + final int tableLength = currentTable.length; + if (tableLength == 0) { currentTable = this.table; continue; } + + final int index = hash & (tableLength - 1); + TableEntry head = getAtIndexVolatile(currentTable, index); + + // Case 1: Bin is empty. Use placeholder. + if (head == null) { + TableEntry placeholder = new TableEntry<>(key, null); + V computedValue; + synchronized (placeholder) { + if (getAtIndexVolatile(currentTable, index) == null) { + try { + computedValue = function.apply(key); + } catch (Throwable t) { ThrowUtil.throwUnchecked(t); return null; } + + if (computedValue != null) { + placeholder.setValuePlain(computedValue); + if (compareAndExchangeAtIndexVolatile(currentTable, index, null, placeholder) == null) { + sizeDelta = 1; + finalValue = computedValue; + break table_loop; // Inserted + } else { + continue table_loop; // CAS failed, retry + } + } else { + finalValue = null; // Computed null + break table_loop; + } + } + } // End synchronized(placeholder) + continue table_loop; // Bin changed, retry + } // End Case 1 + + // Case 2: Resize marker + if (head.isResizeMarker()) { + currentTable = helpResizeOrGetNextTable(currentTable, head); + continue table_loop; + } + + // Case 3: Lock-free check if key already exists with value + TableEntry node = head; + while (node != null) { + if (node.key == key) { + V existingValue = node.getValueVolatile(); // Volatile read + if (existingValue != null) { + return existingValue; // Already present + } + break; // Placeholder found, need lock + } + node = node.getNextVolatile(); + } + + // Case 4: Locking path + synchronized (head) { + TableEntry currentHead = getAtIndexVolatile(currentTable, index); + if (currentHead != head || head.isResizeMarker()) { + continue table_loop; // Retry + } + + TableEntry prev = null; + node = head; + while (node != null) { + if (node.key == key) { + V existingValue = node.getValuePlain(); // Plain read in lock + if (existingValue != null) { + finalValue = existingValue; // Found inside lock + } else { + // Placeholder exists, compute and update + V computedValue; + try { + computedValue = function.apply(key); + } catch (Throwable t) { ThrowUtil.throwUnchecked(t); return null; } + + if (computedValue != null) { + node.setValueVolatile(computedValue); // Volatile write + sizeDelta = 1; + finalValue = computedValue; + } else { + finalValue = null; // Computed null + } + } + break table_loop; // Done + } + prev = node; + node = node.getNextPlain(); // Plain read + } // End while + + // Key not found. Compute and add. + V computedValue; + try { + computedValue = function.apply(key); + } catch (Throwable t) { ThrowUtil.throwUnchecked(t); return null; } + + if (computedValue != null) { + finalValue = computedValue; + sizeDelta = 1; + TableEntry newNode = new TableEntry<>(key, computedValue); + if (prev != null) prev.setNextRelease(newNode); // Release write + else { continue table_loop; } // Should not happen + } else { + finalValue = null; + sizeDelta = 0; + } + break table_loop; // Done + } // End synchronized(head) + } // End table_loop + + if (sizeDelta > 0) this.addSize(sizeDelta); + return finalValue; + } + + + /** + * If the value for the specified key is present, attempts to compute a new + * mapping given the key and its current mapped value. + * + * @param key key with which the specified value is to be associated + * @param function the function to compute a value + * @return the new value associated with the specified key, or null if none + * @throws NullPointerException if the specified function is null + */ + public V computeIfPresent(final long key, final BiLong1Function function) { + Validate.notNull(function, "Function cannot be null"); + final int hash = getHash(key); + int sizeDelta = 0; + V finalValue = null; + TableEntry[] currentTable = this.table; + + table_loop: + for(;;) { + final int tableLength = currentTable.length; + if (tableLength == 0) return null; + + final int index = hash & (tableLength - 1); + TableEntry head = getAtIndexVolatile(currentTable, index); + + if (head == null) return null; + + if (head.isResizeMarker()) { + currentTable = helpResizeOrGetNextTable(currentTable, head); + continue table_loop; + } + + // Needs lock for potential removal + synchronized (head) { + TableEntry currentHead = getAtIndexVolatile(currentTable, index); + if (currentHead != head || head.isResizeMarker()) { + continue table_loop; // Retry + } + + TableEntry prev = null; + TableEntry node = head; + while (node != null) { + if (node.key == key) { + V oldValue = node.getValuePlain(); // Plain read in lock + if (oldValue != null) { // Only compute if value present + V computedValue; + try { + computedValue = function.apply(key, oldValue); + } catch (Throwable t) { ThrowUtil.throwUnchecked(t); return null; } + + if (computedValue != null) { + node.setValueVolatile(computedValue); // Update (volatile write) + finalValue = computedValue; + sizeDelta = 0; + } else { + // Remove mapping + finalValue = null; + sizeDelta = -1; + TableEntry next = node.getNextPlain(); // Plain read + if (prev == null) setAtIndexRelease(currentTable, index, next); + else prev.setNextRelease(next); + } + } else { + // Placeholder, treat as absent + finalValue = null; + sizeDelta = 0; + } + break table_loop; // Done + } + prev = node; + node = node.getNextPlain(); // Plain read + } // End while + + // Key not found + finalValue = null; + sizeDelta = 0; + break table_loop; + } // End synchronized(head) + } // End table_loop + + if (sizeDelta < 0) this.subSize(-sizeDelta); + return finalValue; + } + + /** + * If the specified key is not already associated with a value or is + * associated with null, associates it with the given non-null value. + * Otherwise, replaces the associated value with the results of the given + * remapping function, or removes if the result is {@code null}. + * + * @param key key with which the resulting value is to be associated + * @param value the non-null value to be merged with the existing value + * @param function the function to recompute a value if present + * @return the new value associated with the specified key, or null if no + * value is associated with the key + * @throws NullPointerException if the specified value or function is null + */ + public V merge(final long key, final V value, final BiFunction function) { + Validate.notNull(value, "Value cannot be null"); + Validate.notNull(function, "Function cannot be null"); + final int hash = getHash(key); + int sizeDelta = 0; + V finalValue = null; + TableEntry[] currentTable = this.table; + + table_loop: + for(;;) { + final int tableLength = currentTable.length; + if (tableLength == 0) { currentTable = this.table; continue; } + + final int index = hash & (tableLength - 1); + TableEntry head = getAtIndexVolatile(currentTable, index); + + // Case 1: Bin empty. Insert value. + if (head == null) { + TableEntry newNode = new TableEntry<>(key, value); + if (compareAndExchangeAtIndexVolatile(currentTable, index, null, newNode) == null) { + sizeDelta = 1; + finalValue = value; + break table_loop; // Inserted + } + continue table_loop; // CAS failed, retry + } + + // Case 2: Resize marker + if (head.isResizeMarker()) { + currentTable = helpResizeOrGetNextTable(currentTable, head); + continue table_loop; + } + + // Case 3: Lock head + synchronized (head) { + TableEntry currentHead = getAtIndexVolatile(currentTable, index); + if (currentHead != head || head.isResizeMarker()) { + continue table_loop; // Retry + } + + TableEntry prev = null; + TableEntry node = head; + while (node != null) { + if (node.key == key) { + // Key found. Merge. + V oldValue = node.getValuePlain(); // Plain read in lock + V computedValue; + if (oldValue != null) { + try { + computedValue = function.apply(oldValue, value); // Apply function + } catch (Throwable t) { ThrowUtil.throwUnchecked(t); return null; } + } else { + computedValue = value; // Use provided value if old was placeholder + } + + if (computedValue != null) { + node.setValueVolatile(computedValue); // Update (volatile write) + finalValue = computedValue; + sizeDelta = (oldValue == null) ? 1 : 0; // Size change if old was placeholder + } else { + // Remove mapping + finalValue = null; + sizeDelta = (oldValue != null) ? -1 : 0; // Size change if old was value + TableEntry next = node.getNextPlain(); // Plain read + if (prev == null) setAtIndexRelease(currentTable, index, next); + else prev.setNextRelease(next); + } + break table_loop; // Done + } + prev = node; + node = node.getNextPlain(); // Plain read + } // End while + + // Key not found. Add provided value. + finalValue = value; + sizeDelta = 1; + TableEntry newNode = new TableEntry<>(key, value); + if (prev != null) prev.setNextRelease(newNode); // Release write + else { continue table_loop; } // Should not happen + break table_loop; // Done + } // End synchronized(head) + } // End table_loop + + if (sizeDelta > 0) this.addSize(sizeDelta); + else if (sizeDelta < 0) this.subSize(-sizeDelta); + + return finalValue; + } + + + /** + * Removes all of the mappings from this map. + * The map will be empty after this call returns. + */ + public void clear() { + long removedCount = 0L; + TableEntry[] currentTable = this.table; // Volatile read + + for (int i = 0; i < currentTable.length; ++i) { + TableEntry head = getAtIndexVolatile(currentTable, i); + + if (head == null || head.isResizeMarker()) continue; + + // Lock bin to clear + synchronized (head) { + TableEntry currentHead = getAtIndexVolatile(currentTable, i); + // Re-check after lock + if (currentHead != head || head.isResizeMarker()) { + continue; // Bin changed, skip + } + + // Count actual mappings and clear bin + TableEntry node = head; + while (node != null) { + if (node.getValuePlain() != null) { // Count non-placeholders + removedCount++; + } + node = node.getNextPlain(); // Plain read in lock + } + // Clear bin head with release semantics + setAtIndexRelease(currentTable, i, null); + } // End synchronized + } // End loop + + if (removedCount > 0) { + this.subSize(removedCount); + } + } + + // --- Iterators and Views --- + + /** Returns an iterator over the map entries. */ + public Iterator> entryIterator() { return new EntryIterator<>(this); } + + /** Returns an iterator over the map entries (implements Iterable). */ + @Override public final Iterator> iterator() { return this.entryIterator(); } + + /** Returns an iterator over the keys. */ + public PrimitiveIterator.OfLong keyIterator() { return new KeyIterator<>(this); } + + /** Returns an iterator over the values. */ + public Iterator valueIterator() { return new ValueIterator<>(this); } + + /** + * Returns a {@link Collection} view of the values contained in this map. + */ + public Collection values() { + Values v = this.values; + return (v != null) ? v : (this.values = new Values<>(this)); + } + + /** + * Returns a {@link Set} view of the mappings contained in this map. + */ + public Set> entrySet() { + EntrySet es = this.entrySet; + return (es != null) ? es : (this.entrySet = new EntrySet<>(this)); + } + + // --- Inner Classes: TableEntry, Iterators, Views --- + + /** + * Represents a key-value mapping entry in the hash table. + * Also used as a resize marker. + */ + public static final class TableEntry { + static final VarHandle TABLE_ENTRY_ARRAY_HANDLE; + private static final VarHandle VALUE_HANDLE; + private static final VarHandle NEXT_HANDLE; + + static { + try { + TABLE_ENTRY_ARRAY_HANDLE = ConcurrentUtil.getArrayHandle(TableEntry[].class); + VALUE_HANDLE = ConcurrentUtil.getVarHandle(TableEntry.class, "value", Object.class); + NEXT_HANDLE = ConcurrentUtil.getVarHandle(TableEntry.class, "next", TableEntry.class); + } catch (Throwable t) { + throw new Error("Failed to initialize TableEntry VarHandles", t); + } + } + + final long key; + private volatile V value; + private volatile TableEntry next; + private final boolean resizeMarker; + + /** Constructor for regular map entries. */ + TableEntry(final long key, final V value) { + this(key, value, false); + } + + /** Constructor for potentially creating resize markers. */ + TableEntry(final long key, final V value, final boolean resize) { + this.key = key; + this.resizeMarker = resize; + this.setValuePlain(value); // Initial plain set + } + + public long getKey() { return this.key; } + public V getValue() { return getValueVolatile(); } + + public V setValue(V newValue) { + throw new UnsupportedOperationException("Direct setValue on TableEntry is not supported; use map methods."); + } + + @SuppressWarnings("unchecked") final V getValuePlain() { return (V) VALUE_HANDLE.get(this); } + @SuppressWarnings("unchecked") final V getValueAcquire() { return (V) VALUE_HANDLE.getAcquire(this); } + @SuppressWarnings("unchecked") final V getValueVolatile() { return (V) VALUE_HANDLE.getVolatile(this); } + + final void setValuePlain(final V value) { VALUE_HANDLE.set(this, value); } + final void setValueRelease(final V value) { VALUE_HANDLE.setRelease(this, value); } + final void setValueVolatile(final V value) { VALUE_HANDLE.setVolatile(this, value); } + + @SuppressWarnings("unchecked") + final boolean compareAndSetValueVolatile(final V expect, final V update) { + return VALUE_HANDLE.compareAndSet(this, expect, update); + } + + @SuppressWarnings("unchecked") final TableEntry getNextPlain() { return (TableEntry) NEXT_HANDLE.get(this); } + @SuppressWarnings("unchecked") final TableEntry getNextVolatile() { return (TableEntry) NEXT_HANDLE.getVolatile(this); } + + final void setNextPlain(final TableEntry next) { NEXT_HANDLE.set(this, next); } + final void setNextRelease(final TableEntry next) { NEXT_HANDLE.setRelease(this, next); } + final void setNextVolatile(final TableEntry next) { NEXT_HANDLE.setVolatile(this, next); } + + final boolean isResizeMarker() { return this.resizeMarker; } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || !(o instanceof LeafConcurrentLong2ReferenceChainedHashTable.TableEntry)) return false; + TableEntry that = (TableEntry) o; + return key == that.key && Objects.equals(getValueVolatile(), that.getValueVolatile()); // Use volatile read for value + } + + @Override + public int hashCode() { + return Long.hashCode(key) ^ Objects.hashCode(getValueVolatile()); // Use volatile read for value + } + + @Override + public String toString() { + return key + "=" + getValueVolatile(); // Use volatile read for value + } + } + + /** + * Base class for traversing nodes, handling resizes. + * Note: This iterator implementation is simplified and might not be fully robust against + * rapid concurrent modifications during iteration, particularly multiple resize events. + * It aims for basic correctness in common scenarios. + */ + protected static class NodeIterator { + final LeafConcurrentLong2ReferenceChainedHashTable map; + TableEntry[] currentTable; + TableEntry nextNode; + int nextTableIndex; + TableEntry currentNodeInChain; // Current node within the chain being processed + + NodeIterator(TableEntry[] initialTable, LeafConcurrentLong2ReferenceChainedHashTable map) { + this.map = map; + this.currentTable = initialTable; // Start with the table state at iterator creation + this.nextNode = null; + // Start iteration from the end of the table backwards + this.nextTableIndex = (initialTable == null || initialTable.length == 0) ? -1 : initialTable.length - 1; + this.currentNodeInChain = null; + advance(); // Find the first element + } + + /** + * Advances to find the next valid node (non-null value, non-marker). + * Sets {@code nextNode}. Handles basic traversal and checks for table changes. + */ + final void advance() { + nextNode = null; // Assume no next node initially + + if (currentNodeInChain != null) { + currentNodeInChain = currentNodeInChain.getNextVolatile(); // Move to next in chain + } + + while (nextNode == null) { + if (currentNodeInChain != null) { + // Check if the node is valid (not marker, has value) + if (!currentNodeInChain.isResizeMarker() && currentNodeInChain.getValueVolatile() != null) { + nextNode = currentNodeInChain; // Found a valid node + return; // Exit advance + } + // Node invalid (marker or placeholder), move to the next + currentNodeInChain = currentNodeInChain.getNextVolatile(); + continue; // Check next node in chain + } + + if (nextTableIndex < 0) { + // Check if the underlying table reference changed (indicates resize) + // This is a simplified check; robust iterators might need more complex resize handling + if (this.currentTable != map.table) { + // Table changed, restart iteration from the new table + this.currentTable = map.table; + this.nextTableIndex = (this.currentTable == null || this.currentTable.length == 0) ? -1 : this.currentTable.length - 1; + this.currentNodeInChain = null; + // Retry finding a node from the beginning of the new table + continue; + } + // No table change and all bins checked + return; // Exhausted + } + + if (this.currentTable != null && this.nextTableIndex < this.currentTable.length) { + TableEntry head = getAtIndexVolatile(this.currentTable, this.nextTableIndex--); // Read head and decrement index + + if (head != null && !head.isResizeMarker()) { + // Start traversing this new chain + currentNodeInChain = head; + // Check if the head itself is a valid node + if (currentNodeInChain.getValueVolatile() != null) { + nextNode = currentNodeInChain; + return; // Found valid node (head of bin) + } + // Head is placeholder, continue loop to check next in chain + continue; + } + // Bin was empty or head was marker. Reset chain traversal. + currentNodeInChain = null; + } else { + // Table became null or index out of bounds (shouldn't happen unless table shrinks drastically) + // Force moving to next index to avoid infinite loop + nextTableIndex--; + currentNodeInChain = null; + // Consider checking map.table again here for robustness + if (this.currentTable != map.table) { + // Restart if table changed + this.currentTable = map.table; + this.nextTableIndex = (this.currentTable == null || this.currentTable.length == 0) ? -1 : this.currentTable.length - 1; + continue; + } + } + } // End while (nextNode == null) + } + + + public final boolean hasNext() { + return this.nextNode != null; + } + + /** Internal method to get the next node and advance. */ + final TableEntry findNext() { + TableEntry e = this.nextNode; + if (e == null) { + return null; // Signifies end for internal use + } + advance(); // Prepare for the *next* call + return e; // Return the previously found node + } + } + + /** + * Base class for concrete iterators (Entry, Key, Value). + * Handles remove() and NoSuchElementException. + */ + protected static abstract class BaseIteratorImpl extends NodeIterator implements Iterator { + protected TableEntry lastReturned; // Node returned by last next() call + + protected BaseIteratorImpl(final LeafConcurrentLong2ReferenceChainedHashTable map) { + super(map.table, map); // Initialize NodeIterator + this.lastReturned = null; + } + + /** Gets the next node, updates lastReturned, advances iterator. */ + protected final TableEntry nextNode() throws NoSuchElementException { + TableEntry node = this.nextNode; // Node pre-fetched by advance() + if (node == null) { + throw new NoSuchElementException(); + } + this.lastReturned = node; // Store for remove() + advance(); // Find the *next* node for the subsequent call + return node; // Return the current node + } + + @Override + public void remove() { + TableEntry last = this.lastReturned; + if (last == null) { + throw new IllegalStateException("next() not called or remove() already called"); + } + this.map.remove(last.key); // Delegate removal to map's method + this.lastReturned = null; // Prevent double remove + } + + @Override + public abstract T next() throws NoSuchElementException; // Must be implemented by subclass + + @Override + public void forEachRemaining(final Consumer action) { + Validate.notNull(action, "Action may not be null"); + while (hasNext()) { + action.accept(next()); + } + } + } + + /** Iterator over map entries (TableEntry objects). */ + protected static final class EntryIterator extends BaseIteratorImpl> { + EntryIterator(final LeafConcurrentLong2ReferenceChainedHashTable map) { super(map); } + + @Override public TableEntry next() throws NoSuchElementException { + return nextNode(); + } + } + + /** Iterator over map keys (long primitives). */ + protected static final class KeyIterator extends BaseIteratorImpl implements PrimitiveIterator.OfLong { + KeyIterator(final LeafConcurrentLong2ReferenceChainedHashTable map) { super(map); } + + @Override public long nextLong() throws NoSuchElementException { + return nextNode().key; + } + + @Override public Long next() throws NoSuchElementException { + return nextLong(); // Autoboxing + } + + @Override public void forEachRemaining(final LongConsumer action) { + Validate.notNull(action, "Action may not be null"); + while (hasNext()) { + action.accept(nextLong()); + } + } + + @Override public void forEachRemaining(final Consumer action) { + if (action instanceof LongConsumer) { + forEachRemaining((LongConsumer) action); + } else { + Validate.notNull(action, "Action may not be null"); + while (hasNext()) { + action.accept(nextLong()); // Autoboxing + } + } + } + } + + /** Iterator over map values. */ + protected static final class ValueIterator extends BaseIteratorImpl { + ValueIterator(final LeafConcurrentLong2ReferenceChainedHashTable map) { super(map); } + + @Override public V next() throws NoSuchElementException { + return nextNode().getValueVolatile(); // Volatile read for value + } + } + + // --- Collection Views --- + + /** Base class for Collection views (Values, EntrySet). */ + protected static abstract class BaseCollection implements Collection { + protected final LeafConcurrentLong2ReferenceChainedHashTable map; + + protected BaseCollection(LeafConcurrentLong2ReferenceChainedHashTable map) { + this.map = Validate.notNull(map); + } + + @Override public int size() { return map.size(); } + @Override public boolean isEmpty() { return map.isEmpty(); } + @Override public abstract boolean contains(Object o); // Subclass responsibility + + @Override public boolean containsAll(Collection c) { + Validate.notNull(c); + for (Object e : c) { + if (!contains(e)) return false; + } + return true; + } + + @Override public Object[] toArray() { + List list = new ArrayList<>(map.size()); + for (E e : this) list.add(e); // Uses iterator() from subclass + return list.toArray(); + } + + @Override @SuppressWarnings("unchecked") + public T[] toArray(T[] a) { + Validate.notNull(a); + List list = new ArrayList<>(map.size()); + for (E e : this) list.add(e); + return list.toArray(a); + } + + @Override public void clear() { map.clear(); } + @Override public boolean add(E e) { throw new UnsupportedOperationException(); } + @Override public boolean addAll(Collection c) { throw new UnsupportedOperationException(); } + + @Override public boolean remove(Object o) { + Iterator it = iterator(); // Subclass provides iterator + while (it.hasNext()) { + if (Objects.equals(o, it.next())) { + it.remove(); // Use iterator's safe remove + return true; + } + } + return false; + } + + @Override public boolean removeAll(Collection c) { + Validate.notNull(c); + boolean modified = false; + Iterator it = iterator(); + while (it.hasNext()) { + if (c.contains(it.next())) { + it.remove(); + modified = true; + } + } + return modified; + } + + @Override public boolean retainAll(Collection c) { + Validate.notNull(c); + boolean modified = false; + Iterator it = iterator(); + while (it.hasNext()) { + if (!c.contains(it.next())) { + it.remove(); + modified = true; + } + } + return modified; + } + + @Override public boolean removeIf(Predicate filter) { + Validate.notNull(filter); + boolean removed = false; + Iterator it = iterator(); + while (it.hasNext()) { + if (filter.test(it.next())) { + it.remove(); + removed = true; + } + } + return removed; + } + + @Override public String toString() { + Iterator it = iterator(); + if (! it.hasNext()) return "[]"; + StringBuilder sb = new StringBuilder("["); + for (;;) { + E e = it.next(); + sb.append(e == this ? "(this Collection)" : e); + if (! it.hasNext()) return sb.append(']').toString(); + sb.append(',').append(' '); + } + } + + @Override public void forEach(Consumer action) { + Validate.notNull(action); + for (E e : this) { // Uses iterator() from subclass + action.accept(e); + } + } + } + + /** Collection view for the map's values. */ + protected static final class Values extends BaseCollection { + Values(LeafConcurrentLong2ReferenceChainedHashTable map) { super(map); } + + @Override public boolean contains(Object o) { + try { + return o != null && map.containsValue((V)o); + } catch (ClassCastException cce) { return false; } + } + + @Override public Iterator iterator() { return map.valueIterator(); } + } + + /** Set view for the map's entries (TableEntry objects). */ + protected static final class EntrySet extends BaseCollection> implements Set> { + EntrySet(LeafConcurrentLong2ReferenceChainedHashTable map) { super(map); } + + @Override public boolean contains(Object o) { + if (!(o instanceof LeafConcurrentLong2ReferenceChainedHashTable.TableEntry)) return false; + TableEntry entry = (TableEntry) o; + V mappedValue = map.get(entry.getKey()); // Concurrent read + // Use volatile read on entry's value for consistent comparison + return mappedValue != null && Objects.equals(mappedValue, entry.getValueVolatile()); + } + + @Override public Iterator> iterator() { return map.entryIterator(); } + + @Override public boolean remove(Object o) { + if (!(o instanceof LeafConcurrentLong2ReferenceChainedHashTable.TableEntry)) return false; + TableEntry entry = (TableEntry) o; + try { + // Use map's atomic remove(key, value) + // Use volatile read for the expected value + return map.remove(entry.getKey(), (V)entry.getValueVolatile()); + } catch(ClassCastException | NullPointerException cce) { // Handle potential type/null issues + return false; + } + } + + @Override public int hashCode() { + int h = 0; + for (TableEntry e : this) { + h += e.hashCode(); // Uses entry's hashCode + } + return h; + } + + @Override public boolean equals(Object o) { + if (o == this) return true; + if (!(o instanceof Set)) return false; + Set c = (Set) o; + if (c.size() != size()) return false; + try { + // relies on containsAll checking entry equality correctly + return containsAll(c); + } catch (ClassCastException | NullPointerException unused) { + return false; + } + } + } +}