Added MongoDB data handler
This commit is contained in:
@@ -84,6 +84,7 @@ allprojects {
|
|||||||
dependencies {
|
dependencies {
|
||||||
// Kotlin
|
// Kotlin
|
||||||
implementation(kotlin("stdlib", version = "1.6.21"))
|
implementation(kotlin("stdlib", version = "1.6.21"))
|
||||||
|
implementation("org.jetbrains.kotlinx:kotlinx-coroutines-core:1.6.1")
|
||||||
|
|
||||||
// Included in spigot jar, no need to move to implementation
|
// Included in spigot jar, no need to move to implementation
|
||||||
compileOnly("org.jetbrains:annotations:23.0.0")
|
compileOnly("org.jetbrains:annotations:23.0.0")
|
||||||
|
|||||||
@@ -14,6 +14,8 @@ dependencies {
|
|||||||
implementation 'com.zaxxer:HikariCP:5.0.0'
|
implementation 'com.zaxxer:HikariCP:5.0.0'
|
||||||
implementation 'net.kyori:adventure-platform-bukkit:4.1.0'
|
implementation 'net.kyori:adventure-platform-bukkit:4.1.0'
|
||||||
implementation 'org.javassist:javassist:3.28.0-GA'
|
implementation 'org.javassist:javassist:3.28.0-GA'
|
||||||
|
implementation 'org.mongodb:mongo-java-driver:2.12.3'
|
||||||
|
implementation 'org.litote.kmongo:kmongo-coroutine:4.6.0'
|
||||||
|
|
||||||
// Included in spigot jar
|
// Included in spigot jar
|
||||||
compileOnly 'com.google.code.gson:gson:2.8.8'
|
compileOnly 'com.google.code.gson:gson:2.8.8'
|
||||||
|
|||||||
@@ -29,6 +29,7 @@ import com.willfp.eco.internal.scheduling.EcoScheduler
|
|||||||
import com.willfp.eco.internal.spigot.data.DataYml
|
import com.willfp.eco.internal.spigot.data.DataYml
|
||||||
import com.willfp.eco.internal.spigot.data.EcoKeyRegistry
|
import com.willfp.eco.internal.spigot.data.EcoKeyRegistry
|
||||||
import com.willfp.eco.internal.spigot.data.EcoProfileHandler
|
import com.willfp.eco.internal.spigot.data.EcoProfileHandler
|
||||||
|
import com.willfp.eco.internal.spigot.data.storage.HandlerType
|
||||||
import com.willfp.eco.internal.spigot.integrations.bstats.MetricHandler
|
import com.willfp.eco.internal.spigot.integrations.bstats.MetricHandler
|
||||||
import com.willfp.eco.internal.spigot.proxy.CommonsInitializerProxy
|
import com.willfp.eco.internal.spigot.proxy.CommonsInitializerProxy
|
||||||
import com.willfp.eco.internal.spigot.proxy.DummyEntityFactoryProxy
|
import com.willfp.eco.internal.spigot.proxy.DummyEntityFactoryProxy
|
||||||
@@ -57,7 +58,7 @@ class EcoHandler : EcoSpigotPlugin(), Handler {
|
|||||||
|
|
||||||
private var adventure: BukkitAudiences? = null
|
private var adventure: BukkitAudiences? = null
|
||||||
private val keyRegistry = EcoKeyRegistry()
|
private val keyRegistry = EcoKeyRegistry()
|
||||||
private val playerProfileHandler = EcoProfileHandler(this.configYml.getBool("mysql.enabled"), this)
|
private val playerProfileHandler = EcoProfileHandler(HandlerType.valueOf(this.configYml.getString("data-handler").uppercase()), this)
|
||||||
|
|
||||||
@Suppress("RedundantNullableReturnType")
|
@Suppress("RedundantNullableReturnType")
|
||||||
private val keyFactory: InternalNamespacedKeyFactory? =
|
private val keyFactory: InternalNamespacedKeyFactory? =
|
||||||
|
|||||||
@@ -7,6 +7,8 @@ import com.willfp.eco.core.data.ServerProfile
|
|||||||
import com.willfp.eco.core.data.keys.PersistentDataKey
|
import com.willfp.eco.core.data.keys.PersistentDataKey
|
||||||
import com.willfp.eco.internal.spigot.EcoSpigotPlugin
|
import com.willfp.eco.internal.spigot.EcoSpigotPlugin
|
||||||
import com.willfp.eco.internal.spigot.data.storage.DataHandler
|
import com.willfp.eco.internal.spigot.data.storage.DataHandler
|
||||||
|
import com.willfp.eco.internal.spigot.data.storage.HandlerType
|
||||||
|
import com.willfp.eco.internal.spigot.data.storage.MongoDataHandler
|
||||||
import com.willfp.eco.internal.spigot.data.storage.MySQLDataHandler
|
import com.willfp.eco.internal.spigot.data.storage.MySQLDataHandler
|
||||||
import com.willfp.eco.internal.spigot.data.storage.YamlDataHandler
|
import com.willfp.eco.internal.spigot.data.storage.YamlDataHandler
|
||||||
import java.util.UUID
|
import java.util.UUID
|
||||||
@@ -14,12 +16,16 @@ import java.util.UUID
|
|||||||
val serverProfileUUID = UUID(0, 0)
|
val serverProfileUUID = UUID(0, 0)
|
||||||
|
|
||||||
class EcoProfileHandler(
|
class EcoProfileHandler(
|
||||||
useSql: Boolean,
|
type: HandlerType,
|
||||||
plugin: EcoSpigotPlugin
|
plugin: EcoSpigotPlugin
|
||||||
) : ProfileHandler {
|
) : ProfileHandler {
|
||||||
private val loaded = mutableMapOf<UUID, Profile>()
|
private val loaded = mutableMapOf<UUID, Profile>()
|
||||||
val handler: DataHandler = if (useSql) MySQLDataHandler(plugin, this) else
|
|
||||||
YamlDataHandler(plugin, this)
|
val handler: DataHandler = when(type) {
|
||||||
|
HandlerType.YAML -> YamlDataHandler(plugin, this)
|
||||||
|
HandlerType.MYSQL -> MySQLDataHandler(plugin, this)
|
||||||
|
HandlerType.MONGO -> MongoDataHandler(plugin)
|
||||||
|
}
|
||||||
|
|
||||||
fun loadGenericProfile(uuid: UUID): Profile {
|
fun loadGenericProfile(uuid: UUID): Profile {
|
||||||
val found = loaded[uuid]
|
val found = loaded[uuid]
|
||||||
|
|||||||
@@ -6,7 +6,10 @@ import org.bukkit.NamespacedKey
|
|||||||
import java.util.UUID
|
import java.util.UUID
|
||||||
|
|
||||||
interface DataHandler {
|
interface DataHandler {
|
||||||
fun save()
|
fun save() {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
fun saveAll(uuids: Iterable<UUID>)
|
fun saveAll(uuids: Iterable<UUID>)
|
||||||
|
|
||||||
fun categorize(key: PersistentDataKey<*>, category: KeyRegistry.KeyCategory) {
|
fun categorize(key: PersistentDataKey<*>, category: KeyRegistry.KeyCategory) {
|
||||||
|
|||||||
@@ -0,0 +1,7 @@
|
|||||||
|
package com.willfp.eco.internal.spigot.data.storage
|
||||||
|
|
||||||
|
enum class HandlerType {
|
||||||
|
YAML,
|
||||||
|
MYSQL,
|
||||||
|
MONGO
|
||||||
|
}
|
||||||
@@ -0,0 +1,101 @@
|
|||||||
|
package com.willfp.eco.internal.spigot.data.storage
|
||||||
|
|
||||||
|
import com.willfp.eco.core.EcoPlugin
|
||||||
|
import com.willfp.eco.core.data.keys.PersistentDataKey
|
||||||
|
import kotlinx.coroutines.launch
|
||||||
|
import kotlinx.coroutines.runBlocking
|
||||||
|
import org.bson.codecs.pojo.annotations.BsonId
|
||||||
|
import org.bukkit.NamespacedKey
|
||||||
|
import org.litote.kmongo.coroutine.CoroutineClient
|
||||||
|
import org.litote.kmongo.coroutine.CoroutineCollection
|
||||||
|
import org.litote.kmongo.coroutine.coroutine
|
||||||
|
import org.litote.kmongo.eq
|
||||||
|
import org.litote.kmongo.reactivestreams.KMongo
|
||||||
|
import org.litote.kmongo.setValue
|
||||||
|
import java.util.UUID
|
||||||
|
|
||||||
|
@Suppress("UNCHECKED_CAST")
|
||||||
|
class MongoDataHandler(
|
||||||
|
plugin: EcoPlugin
|
||||||
|
) : DataHandler {
|
||||||
|
private val client: CoroutineClient
|
||||||
|
private val collection: CoroutineCollection<SerializableProfile>
|
||||||
|
|
||||||
|
init {
|
||||||
|
val url = plugin.configYml.getString("mongodb.url")
|
||||||
|
|
||||||
|
client = KMongo.createClient(url).coroutine
|
||||||
|
collection = client.getDatabase("eco").getCollection()
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun saveAll(uuids: Iterable<UUID>) {
|
||||||
|
for (uuid in uuids) {
|
||||||
|
saveKeysFor(uuid, PersistentDataKey.values())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun <T> write(uuid: UUID, key: NamespacedKey, value: T) {
|
||||||
|
runBlocking {
|
||||||
|
launch {
|
||||||
|
doWrite(uuid, key, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private suspend fun <T> doWrite(uuid: UUID, key: NamespacedKey, value: T) {
|
||||||
|
val profile = getOrCreateDocument(uuid)
|
||||||
|
|
||||||
|
val newData = profile.data.apply {
|
||||||
|
if (value == null) {
|
||||||
|
this.remove(key)
|
||||||
|
} else {
|
||||||
|
this[key] = value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
collection.updateOne(SerializableProfile::uuid eq uuid, setValue(SerializableProfile::data, newData))
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun saveKeysFor(uuid: UUID, keys: Set<PersistentDataKey<*>>) {
|
||||||
|
runBlocking {
|
||||||
|
launch {
|
||||||
|
for (key in keys) {
|
||||||
|
doWrite(uuid, key.key, read(uuid, key))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun <T> read(uuid: UUID, key: PersistentDataKey<T>): T? {
|
||||||
|
return runBlocking {
|
||||||
|
doRead(uuid, key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private suspend fun <T> doRead(uuid: UUID, key: PersistentDataKey<T>): T? {
|
||||||
|
val profile = collection.findOne(SerializableProfile::uuid eq uuid) ?: return key.defaultValue
|
||||||
|
return profile.data[key.key] as? T?
|
||||||
|
}
|
||||||
|
|
||||||
|
private suspend fun getOrCreateDocument(uuid: UUID): SerializableProfile {
|
||||||
|
val profile = collection.findOne(SerializableProfile::uuid eq uuid)
|
||||||
|
return if (profile == null) {
|
||||||
|
collection.insertOne(
|
||||||
|
SerializableProfile(
|
||||||
|
uuid,
|
||||||
|
mutableMapOf()
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
getOrCreateDocument(uuid)
|
||||||
|
} else {
|
||||||
|
profile
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private data class SerializableProfile(
|
||||||
|
@BsonId
|
||||||
|
val uuid: UUID,
|
||||||
|
val data: MutableMap<NamespacedKey, Any>
|
||||||
|
)
|
||||||
@@ -45,7 +45,6 @@ class MySQLDataHandler(
|
|||||||
private val serverHandler: ImplementedMySQLHandler
|
private val serverHandler: ImplementedMySQLHandler
|
||||||
|
|
||||||
init {
|
init {
|
||||||
|
|
||||||
val config = HikariConfig()
|
val config = HikariConfig()
|
||||||
config.driverClassName = "com.mysql.cj.jdbc.Driver"
|
config.driverClassName = "com.mysql.cj.jdbc.Driver"
|
||||||
config.username = plugin.configYml.getString("mysql.user")
|
config.username = plugin.configYml.getString("mysql.user")
|
||||||
|
|||||||
@@ -3,8 +3,13 @@
|
|||||||
# by Auxilor
|
# by Auxilor
|
||||||
#
|
#
|
||||||
|
|
||||||
|
handler-type: yaml # Pick from yaml/mongo/mysql - MongoDB is recommended over MySQL for networks.
|
||||||
|
|
||||||
|
mongodb:
|
||||||
|
# The full MongoDB connection URL.
|
||||||
|
url: ""
|
||||||
|
|
||||||
mysql:
|
mysql:
|
||||||
enabled: false # Set to false, data.yml will be used instead.
|
|
||||||
# How many threads to execute statements on. Higher numbers can be faster however
|
# How many threads to execute statements on. Higher numbers can be faster however
|
||||||
# very high numbers can cause issues with OS configuration. If writes are taking
|
# very high numbers can cause issues with OS configuration. If writes are taking
|
||||||
# too long, increase this value.
|
# too long, increase this value.
|
||||||
|
|||||||
Reference in New Issue
Block a user