mirror of
https://github.com/VolmitSoftware/Iris.git
synced 2025-12-31 04:46:40 +00:00
sync and fixes
This commit is contained in:
@@ -44,6 +44,8 @@ public class IrisSettings {
|
||||
private IrisSettingsPerformance performance = new IrisSettingsPerformance();
|
||||
|
||||
public static int getThreadCount(int c) {
|
||||
if (System.getProperty("os.name").toLowerCase().contains("win"))
|
||||
return Runtime.getRuntime().availableProcessors();
|
||||
return switch (c) {
|
||||
case -1, -2, -4 -> Runtime.getRuntime().availableProcessors() / -c;
|
||||
case 0, 1, 2 -> 1;
|
||||
@@ -132,6 +134,7 @@ public class IrisSettings {
|
||||
@Data
|
||||
public static class IrisSettingsConcurrency {
|
||||
public int parallelism = -1;
|
||||
public boolean windowsFullPerformance = true;
|
||||
}
|
||||
|
||||
@Data
|
||||
|
||||
@@ -26,6 +26,7 @@ import com.volmit.iris.core.nms.datapack.DataVersion;
|
||||
import com.volmit.iris.core.service.IrisEngineSVC;
|
||||
import com.volmit.iris.core.tools.IrisPackBenchmarking;
|
||||
import com.volmit.iris.core.tools.IrisToolbelt;
|
||||
import com.volmit.iris.core.tools.IrisWorldAnalytics;
|
||||
import com.volmit.iris.engine.framework.Engine;
|
||||
import com.volmit.iris.engine.object.IrisDimension;
|
||||
import com.volmit.iris.util.decree.DecreeExecutor;
|
||||
@@ -169,6 +170,7 @@ public class CommandDeveloper implements DecreeExecutor {
|
||||
File[] McaFiles = new File(world, "region").listFiles((dir, name) -> name.endsWith(".mca"));
|
||||
for (File mca : McaFiles) {
|
||||
MCAFile MCARegion = MCAUtil.read(mca);
|
||||
int i = 0;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
@@ -176,6 +178,19 @@ public class CommandDeveloper implements DecreeExecutor {
|
||||
|
||||
}
|
||||
|
||||
@Decree(description = "test")
|
||||
public void anl (
|
||||
@Param(description = "String") String world) {
|
||||
try {
|
||||
IrisWorldAnalytics a = new IrisWorldAnalytics(world);
|
||||
a.execute();
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
@Decree(description = "UnloadChunks for good reasons.")
|
||||
public void unloadchunks() {
|
||||
List<World> IrisWorlds = new ArrayList<>();
|
||||
|
||||
@@ -0,0 +1,111 @@
|
||||
package com.volmit.iris.core.tools;
|
||||
|
||||
import com.volmit.iris.Iris;
|
||||
import com.volmit.iris.util.format.Form;
|
||||
import com.volmit.iris.util.math.M;
|
||||
import com.volmit.iris.util.math.RollingSequence;
|
||||
import com.volmit.iris.util.nbt.mca.MCAFile;
|
||||
import com.volmit.iris.util.nbt.mca.MCAUtil;
|
||||
import com.volmit.iris.util.parallel.BurstExecutor;
|
||||
import com.volmit.iris.util.parallel.MultiBurst;
|
||||
import com.volmit.iris.util.scheduling.ChronoLatch;
|
||||
import com.volmit.iris.util.scheduling.Looper;
|
||||
|
||||
import java.io.File;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
public class IrisWorldAnalytics {
|
||||
private final ChronoLatch latch;
|
||||
private final String world;
|
||||
private final AtomicInteger totalChunks;
|
||||
private final AtomicInteger processed;
|
||||
private final RollingSequence chunksPerSecond;
|
||||
private final AtomicLong startTime;
|
||||
private final Looper ticker;
|
||||
|
||||
public IrisWorldAnalytics(String world) {
|
||||
this.world = world;
|
||||
|
||||
totalChunks = new AtomicInteger();
|
||||
processed = new AtomicInteger(0);
|
||||
latch = new ChronoLatch(3000);
|
||||
chunksPerSecond = new RollingSequence(3000);
|
||||
startTime = new AtomicLong(M.ms());
|
||||
index();
|
||||
ticker = new Looper() {
|
||||
@Override
|
||||
protected long loop() {
|
||||
|
||||
|
||||
|
||||
return 1000;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
public void execute() {
|
||||
Iris.info("Starting world analyser..");
|
||||
long startTime = System.currentTimeMillis();
|
||||
|
||||
}
|
||||
|
||||
private long computeETA() {
|
||||
return (long) (totalChunks.get() > 1024 ? // Generated chunks exceed 1/8th of total?
|
||||
// If yes, use smooth function (which gets more accurate over time since its less sensitive to outliers)
|
||||
((totalChunks.get() - processed.get()) * ((double) (M.ms() - startTime.get()) / (double) processed.get())) :
|
||||
// If no, use quick function (which is less accurate over time but responds better to the initial delay)
|
||||
((totalChunks.get() - processed.get()) / chunksPerSecond.getAverage()) * 1000
|
||||
);
|
||||
}
|
||||
|
||||
private void index() {
|
||||
try {
|
||||
AtomicInteger chunks = new AtomicInteger();
|
||||
AtomicInteger pr = new AtomicInteger();
|
||||
AtomicInteger pl = new AtomicInteger(0);
|
||||
RollingSequence rps = new RollingSequence(5);
|
||||
ChronoLatch cl = new ChronoLatch(3000);
|
||||
File[] McaFiles = new File(world, "region").listFiles((dir, name) -> name.endsWith(".mca"));
|
||||
Supplier<Long> eta = () -> (long) ((McaFiles.length - pr.get()) / rps.getAverage()) * 1000;
|
||||
ScheduledFuture<?> sc = Executors.newSingleThreadScheduledExecutor().scheduleAtFixedRate(() -> {
|
||||
int sp = pr.get() - pl.get();
|
||||
pl.set(pr.get());
|
||||
rps.put(sp);
|
||||
if (cl.flip()) {
|
||||
double pc = ((double) pr.get() / (double) McaFiles.length) * 100;
|
||||
Iris.info("Indexing: " + Form.f(pr.get()) + " of " + Form.f(McaFiles.length) + " (%.0f%%) " + Form.f((int) rps.getAverage()) + "/s ETA: " + Form.duration(eta.get(), 2), pc);
|
||||
}
|
||||
}, 3,1, TimeUnit.SECONDS);
|
||||
BurstExecutor b = MultiBurst.burst.burst(McaFiles.length);
|
||||
for (File mca : McaFiles) {
|
||||
b.queue(() -> {
|
||||
try {
|
||||
MCAFile region = MCAUtil.read(mca, 0);
|
||||
var array = region.getChunks();
|
||||
for (int i = 0; i < array.length(); i++) {
|
||||
if (array.get(i) != null) {
|
||||
chunks.incrementAndGet();
|
||||
}
|
||||
}
|
||||
pr.incrementAndGet();
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
});
|
||||
}
|
||||
b.complete();
|
||||
sc.cancel(true);
|
||||
totalChunks.set(chunks.get());
|
||||
Iris.info("Indexing completed!");
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -157,9 +157,11 @@ public class IrisTerrainNormalActuator extends EngineAssignedActuator<BlockData>
|
||||
ore = ore == null ? region.generateOres(realX, i, realZ, rng, getData()) : ore;
|
||||
ore = ore == null ? getDimension().generateOres(realX, i, realZ, rng, getData()) : ore;
|
||||
|
||||
if (!h.get(xf, i, zf).getMaterial().isAir()) {
|
||||
if (ore != null) {
|
||||
h.set(xf, i, zf, ore);
|
||||
if (ore != null) {
|
||||
h.set(xf, i, zf, ore);
|
||||
} else {
|
||||
if (getDimension().isDeepslateLayer() && i < 64) {
|
||||
h.set(xf, i, zf, DEEPSLATE);
|
||||
} else {
|
||||
h.set(xf, i, zf, context.getRock().get(xf, zf));
|
||||
}
|
||||
|
||||
@@ -41,7 +41,6 @@ public class Chunk {
|
||||
private int lastMCAUpdate;
|
||||
private CompoundTag data;
|
||||
private int dataVersion;
|
||||
private int nativeIrisVersion;
|
||||
private long lastUpdate;
|
||||
private long inhabitedTime;
|
||||
private MCABiomeContainer biomes;
|
||||
@@ -150,8 +149,8 @@ public class Chunk {
|
||||
if ((loadFlags & STRUCTURES) != 0) {
|
||||
structures = level.getCompoundTag("Structures");
|
||||
}
|
||||
if ((loadFlags & (BLOCK_LIGHTS | BLOCK_STATES | SKY_LIGHT)) != 0 && level.containsKey("Sections")) {
|
||||
for (CompoundTag section : level.getListTag("Sections").asCompoundTagList()) {
|
||||
if ((loadFlags & (BLOCK_LIGHTS | BLOCK_STATES | SKY_LIGHT)) != 0 && level.containsKey("sections")) {
|
||||
for (CompoundTag section : level.getListTag("sections").asCompoundTagList()) {
|
||||
int sectionIndex = section.getByte("Y");
|
||||
if (sectionIndex > 15 || sectionIndex < 0) {
|
||||
continue;
|
||||
|
||||
Reference in New Issue
Block a user