mirror of
https://github.com/VolmitSoftware/Iris.git
synced 2025-12-19 15:09:18 +00:00
optimize data palette for mantle slices
This commit is contained in:
@@ -27,6 +27,7 @@ import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
public class IrisEngineSVC implements IrisService {
|
||||
private static final int TRIM_PERIOD = 2_000;
|
||||
private final AtomicInteger tectonicLimit = new AtomicInteger(30);
|
||||
private final AtomicInteger tectonicPlates = new AtomicInteger();
|
||||
private final AtomicInteger queuedTectonicPlates = new AtomicInteger();
|
||||
@@ -68,6 +69,7 @@ public class IrisEngineSVC implements IrisService {
|
||||
sender.sendMessage(C.DARK_PURPLE + "Status:");
|
||||
sender.sendMessage(C.DARK_PURPLE + "- Service: " + C.LIGHT_PURPLE + (service.isShutdown() ? "Shutdown" : "Running"));
|
||||
sender.sendMessage(C.DARK_PURPLE + "- Updater: " + C.LIGHT_PURPLE + (updateTicker.isAlive() ? "Running" : "Stopped"));
|
||||
sender.sendMessage(C.DARK_PURPLE + "- Period: " + C.LIGHT_PURPLE + Form.duration(TRIM_PERIOD));
|
||||
sender.sendMessage(C.DARK_PURPLE + "- Trimmers: " + C.LIGHT_PURPLE + trimmerAlive.get());
|
||||
sender.sendMessage(C.DARK_PURPLE + "- Unloaders: " + C.LIGHT_PURPLE + unloaderAlive.get());
|
||||
sender.sendMessage(C.DARK_PURPLE + "Tectonic Plates:");
|
||||
@@ -157,7 +159,7 @@ public class IrisEngineSVC implements IrisService {
|
||||
private final class Registered {
|
||||
private final String name;
|
||||
private final PlatformChunkGenerator access;
|
||||
private final int offset = RNG.r.nextInt(1000);
|
||||
private final int offset = RNG.r.nextInt(TRIM_PERIOD);
|
||||
private transient ScheduledFuture<?> trimmer;
|
||||
private transient ScheduledFuture<?> unloader;
|
||||
private transient boolean closed;
|
||||
@@ -194,7 +196,7 @@ public class IrisEngineSVC implements IrisService {
|
||||
Iris.error("EngineSVC: Failed to trim for " + name);
|
||||
e.printStackTrace();
|
||||
}
|
||||
}, offset, 2000, TimeUnit.MILLISECONDS);
|
||||
}, offset, TRIM_PERIOD, TimeUnit.MILLISECONDS);
|
||||
}
|
||||
|
||||
if (unloader == null || unloader.isDone() || unloader.isCancelled()) {
|
||||
@@ -214,7 +216,7 @@ public class IrisEngineSVC implements IrisService {
|
||||
Iris.error("EngineSVC: Failed to unload for " + name);
|
||||
e.printStackTrace();
|
||||
}
|
||||
}, offset + 1000, 2000, TimeUnit.MILLISECONDS);
|
||||
}, offset + 1000, TRIM_PERIOD, TimeUnit.MILLISECONDS);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -19,21 +19,30 @@
|
||||
package com.volmit.iris.util.hunk.bits;
|
||||
|
||||
import com.volmit.iris.util.data.Varint;
|
||||
import lombok.Synchronized;
|
||||
import it.unimi.dsi.fastutil.ints.*;
|
||||
|
||||
import java.io.*;
|
||||
import java.util.concurrent.locks.Lock;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
|
||||
public class DataContainer<T> {
|
||||
private static final boolean TRIM = Boolean.getBoolean("iris.trim-palette");
|
||||
protected static final int INITIAL_BITS = 3;
|
||||
protected static final int LINEAR_BITS_LIMIT = 4;
|
||||
protected static final int LINEAR_INITIAL_LENGTH = (int) Math.pow(2, LINEAR_BITS_LIMIT) + 1;
|
||||
protected static final int[] BIT = computeBitLimits();
|
||||
private final Lock read, write;
|
||||
|
||||
private volatile Palette<T> palette;
|
||||
private volatile DataBits data;
|
||||
private final int length;
|
||||
private final Writable<T> writer;
|
||||
|
||||
public DataContainer(Writable<T> writer, int length) {
|
||||
var lock = new ReentrantReadWriteLock();
|
||||
this.read = lock.readLock();
|
||||
this.write = lock.writeLock();
|
||||
|
||||
this.writer = writer;
|
||||
this.length = length;
|
||||
this.data = new DataBits(INITIAL_BITS, length);
|
||||
@@ -41,10 +50,15 @@ public class DataContainer<T> {
|
||||
}
|
||||
|
||||
public DataContainer(DataInputStream din, Writable<T> writer) throws IOException {
|
||||
var lock = new ReentrantReadWriteLock();
|
||||
this.read = lock.readLock();
|
||||
this.write = lock.writeLock();
|
||||
|
||||
this.writer = writer;
|
||||
this.length = Varint.readUnsignedVarInt(din);
|
||||
this.palette = newPalette(din);
|
||||
this.data = new DataBits(palette.bits(), length, din);
|
||||
trim();
|
||||
}
|
||||
|
||||
private static int[] computeBitLimits() {
|
||||
@@ -86,13 +100,18 @@ public class DataContainer<T> {
|
||||
writeDos(new DataOutputStream(out));
|
||||
}
|
||||
|
||||
@Synchronized
|
||||
public void writeDos(DataOutputStream dos) throws IOException {
|
||||
Varint.writeUnsignedVarInt(length, dos);
|
||||
Varint.writeUnsignedVarInt(palette.size(), dos);
|
||||
palette.iterateIO((data, __) -> writer.writeNodeData(dos, data));
|
||||
data.write(dos);
|
||||
dos.flush();
|
||||
write.lock();
|
||||
try {
|
||||
trim();
|
||||
Varint.writeUnsignedVarInt(length, dos);
|
||||
Varint.writeUnsignedVarInt(palette.size(), dos);
|
||||
palette.iterateIO((data, __) -> writer.writeNodeData(dos, data));
|
||||
data.write(dos);
|
||||
dos.flush();
|
||||
} finally {
|
||||
write.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
private Palette<T> newPalette(DataInputStream din) throws IOException {
|
||||
@@ -110,25 +129,38 @@ public class DataContainer<T> {
|
||||
return new HashPalette<>();
|
||||
}
|
||||
|
||||
@Synchronized
|
||||
public void set(int position, T t) {
|
||||
int id = palette.id(t);
|
||||
int id;
|
||||
|
||||
if (id == -1) {
|
||||
id = palette.add(t);
|
||||
updateBits();
|
||||
read.lock();
|
||||
try {
|
||||
id = palette.id(t);
|
||||
if (id == -1) {
|
||||
id = palette.add(t);
|
||||
if (palette.bits() == data.getBits()) {
|
||||
data.set(position, id);
|
||||
return;
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
read.unlock();
|
||||
}
|
||||
|
||||
data.set(position, id);
|
||||
write.lock();
|
||||
try {
|
||||
updateBits();
|
||||
data.set(position, id);
|
||||
} finally {
|
||||
write.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Synchronized
|
||||
@SuppressWarnings("NonAtomicOperationOnVolatileField")
|
||||
private void updateBits() {
|
||||
if (palette.bits() == data.getBits())
|
||||
int bits = palette.bits();
|
||||
if (bits == data.getBits())
|
||||
return;
|
||||
|
||||
int bits = palette.bits();
|
||||
if (data.getBits() <= LINEAR_BITS_LIMIT != bits <= LINEAR_BITS_LIMIT) {
|
||||
palette = newPalette(bits).from(palette);
|
||||
}
|
||||
@@ -136,18 +168,44 @@ public class DataContainer<T> {
|
||||
data = data.setBits(bits);
|
||||
}
|
||||
|
||||
@Synchronized
|
||||
public T get(int position) {
|
||||
int id = data.get(position);
|
||||
read.lock();
|
||||
try {
|
||||
int id = data.get(position);
|
||||
|
||||
if (id <= 0) {
|
||||
return null;
|
||||
if (id <= 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return palette.get(id);
|
||||
} finally {
|
||||
read.unlock();
|
||||
}
|
||||
|
||||
return palette.get(id);
|
||||
}
|
||||
|
||||
public int size() {
|
||||
return data.getSize();
|
||||
}
|
||||
|
||||
private void trim() {
|
||||
var ints = new Int2IntRBTreeMap();
|
||||
for (int i = 0; i < length; i++) {
|
||||
int x = data.get(i);
|
||||
if (x <= 0) continue;
|
||||
ints.put(x, x);
|
||||
}
|
||||
if (ints.size() == palette.size())
|
||||
return;
|
||||
|
||||
int bits = bits(ints.size() + 1);
|
||||
var trimmed = newPalette(bits);
|
||||
ints.replaceAll((k, v) -> trimmed.add(palette.get(k)));
|
||||
var tBits = new DataBits(bits, length);
|
||||
for (int i = 0; i < length; i++) {
|
||||
tBits.set(i, ints.get(data.get(i)));
|
||||
}
|
||||
|
||||
data = tBits;
|
||||
palette = trimmed;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,13 +27,14 @@ import java.util.LinkedHashMap;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
public class HashPalette<T> implements Palette<T> {
|
||||
private final LinkedHashMap<T, Integer> palette;
|
||||
private final Object lock = new Object();
|
||||
private final KMap<T, Integer> palette;
|
||||
private final KMap<Integer, T> lookup;
|
||||
private final AtomicInteger size;
|
||||
|
||||
public HashPalette() {
|
||||
this.size = new AtomicInteger(1);
|
||||
this.palette = new LinkedHashMap<>();
|
||||
this.palette = new KMap<>();
|
||||
this.lookup = new KMap<>();
|
||||
}
|
||||
|
||||
@@ -52,13 +53,13 @@ public class HashPalette<T> implements Palette<T> {
|
||||
return 0;
|
||||
}
|
||||
|
||||
synchronized (palette) {
|
||||
return palette.computeIfAbsent(t, $ -> {
|
||||
return palette.computeIfAbsent(t, $ -> {
|
||||
synchronized (lock) {
|
||||
int index = size.getAndIncrement();
|
||||
lookup.put(index, t);
|
||||
return index;
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -78,7 +79,7 @@ public class HashPalette<T> implements Palette<T> {
|
||||
|
||||
@Override
|
||||
public void iterate(Consumer2<T, Integer> c) {
|
||||
synchronized (palette) {
|
||||
synchronized (lock) {
|
||||
for (int i = 1; i < size.get(); i++) {
|
||||
c.accept(lookup.get(i), i);
|
||||
}
|
||||
|
||||
@@ -45,25 +45,23 @@ public class LinearPalette<T> implements Palette<T> {
|
||||
|
||||
@Override
|
||||
public int add(T t) {
|
||||
if (t == null) {
|
||||
return 0;
|
||||
}
|
||||
int index = size.getAndIncrement();
|
||||
grow(index + 1);
|
||||
if (palette.length() <= index)
|
||||
grow(index);
|
||||
palette.set(index, t);
|
||||
return index;
|
||||
}
|
||||
|
||||
private synchronized void grow(int newLength) {
|
||||
if (newLength > palette.length()) {
|
||||
AtomicReferenceArray<T> a = new AtomicReferenceArray<>(newLength);
|
||||
if (palette.length() <= newLength)
|
||||
return;
|
||||
|
||||
for (int i = 0; i < palette.length(); i++) {
|
||||
a.set(i, palette.get(i));
|
||||
}
|
||||
|
||||
palette = a;
|
||||
AtomicReferenceArray<T> a = new AtomicReferenceArray<>(newLength + 1);
|
||||
for (int i = 0; i < palette.length(); i++) {
|
||||
a.set(i, palette.get(i));
|
||||
}
|
||||
|
||||
palette = a;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@@ -999,7 +999,7 @@ public class IrisInterpolation {
|
||||
|
||||
public static double getNoise(InterpolationMethod method, int x, int z, double h, NoiseProvider noise) {
|
||||
HashMap<NoiseKey, Double> cache = new HashMap<>(64);
|
||||
NoiseProvider n = (x1, z1) -> cache.computeIfAbsent(new NoiseKey(x1, z1), k -> noise.noise(k.x, k.z));
|
||||
NoiseProvider n = (x1, z1) -> cache.computeIfAbsent(new NoiseKey(x1 - x, z1 - z), k -> noise.noise(x1, z1));
|
||||
|
||||
if (method.equals(InterpolationMethod.BILINEAR)) {
|
||||
return getBilinearNoise(x, z, h, n);
|
||||
|
||||
Reference in New Issue
Block a user