Skip to content

Commit

Permalink
Merge branch 'refs/heads/v3' into v3-CASL-443-delete-feature-by-id
Browse files Browse the repository at this point in the history
# Conflicts:
#	here-naksha-lib-psql/src/commonMain/kotlin/naksha/psql/executors/PgReader.kt
  • Loading branch information
gunplar committed Sep 13, 2024
2 parents e15c652 + f0dd8e6 commit c7072d0
Show file tree
Hide file tree
Showing 23 changed files with 598 additions and 565 deletions.
15 changes: 15 additions & 0 deletions browser_test.html
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,21 @@
const { Platform, PlatformUtil } = require("naksha_base");
const { PgUtil } = require("naksha_psql");
const { JbEncoder, JbDecoder, JbFeatureDecoder } = require("naksha_jbon");

function encodeFeature(json) {
if (!json) json = '{"id":"foo","properties":{"name":"Bar"}}'
let raw = Platform.fromJSON(json)
let klass = Platform.klassFor(base.AnyObject)
let map = Platform.proxy(raw, klass)
let enc = new JbEncoder();
let f = enc.buildFeatureFromMap(map)
return f
}
function decodeFeature(f) {
let dec = new JbFeatureDecoder()
dec.mapBytes(f)
return dec.toAnyObject()
}
</script>
</head>
<body>
Expand Down
4 changes: 2 additions & 2 deletions build.gradle.kts
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,8 @@ plugins {
// https://maven.pkg.jetbrains.space/kotlin/p/kotlin/bootstrap/org/jetbrains/kotlin/kotlin-compiler/maven-metadata.xml
//id("org.jetbrains.kotlin.multiplatform").version("2.1.0-dev-1329").apply(false)
//kotlin("plugin.js-plain-objects").version("2.1.0-dev-1329")
id("org.jetbrains.kotlin.multiplatform").version("2.0.20-Beta2").apply(false)
kotlin("plugin.js-plain-objects").version("2.0.20-Beta2")
id("org.jetbrains.kotlin.multiplatform").version("2.0.20").apply(false)
kotlin("plugin.js-plain-objects").version("2.0.20")
}

group = "com.here.naksha"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -344,11 +344,10 @@ actual class Platform {
@JsStatic
actual fun isProxyKlass(klass: KClass<*>): Boolean = isAssignable(klass, Proxy::class)

// TODO: Find the constructor in namespace of module.
@Suppress("NON_EXPORTABLE_TYPE")
@JsStatic
actual fun <T : Any> klassFor(constructor: KFunction<T>): KClass<out T> =
js("""require('module_name').package.full.path.ClassName""").unsafeCast<KClass<T>>()
actual fun <T : Any> klassFor(constructor: KFunction<T>): KClass<out T>
= (js("Object.create(constructor.prototype)") as T)::class

@Suppress("UNCHECKED_CAST", "NON_EXPORTABLE_TYPE")
@JsStatic
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ enum class FetchMode(val raw: String) {
FETCH_META("meta"),

/**
* Fetch all data except for the [feature][Tuple.feature].
* Fetch all data except for the [feature][Tuple.feature], and [attachment][Tuple.attachment].
*/
FETCH_ALL_BUT_FEATURE("all-but-feature"),

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,11 +89,12 @@ interface ISession : AutoCloseable {
* Load the latest [tuples][Tuple] of the features with the given identifiers, from the given collection/map.
*
* The fetch modes are:
* - [all][FETCH_ALL] (_**default**_) - all columns
* - [all-no-cache][FETCH_ALL] - all columns, but do not access cache (but cache is updated)
* - [id][FETCH_ID] - id and row-id, rest from cache, if available
* - [meta][FETCH_META] - metadata and row-id, rest from cache, if available
* - [cached-only][FETCH_CACHE] - only what is available in cache
* - [all][FetchMode.FETCH_ALL] (_**default**_) - all columns
* - [all-no-cache][FetchMode.FETCH_ALL_NO_CACHE] - all columns, but do not access cache (but cache is updated)
* - [id][FetchMode.FETCH_ID] - id and row-id, rest from cache, if available
* - [meta][FetchMode.FETCH_META] - metadata and row-id, rest from cache, if available
* - [all-but-feature][FetchMode.FETCH_ALL_BUT_FEATURE] - all, except for the payload
* - [cached-only][FetchMode.FETCH_CACHE] - only what is available in cache
*
* @param mapId the map from which to load.
* @param collectionId the collection from to load.
Expand All @@ -108,11 +109,12 @@ interface ISession : AutoCloseable {
* Load specific [tuples][naksha.model.Tuple].
*
* The fetch modes are:
* - [all][FETCH_ALL] (_**default**_) - all columns
* - [all-no-cache][FETCH_ALL] - all columns, but do not access cache (but cache is updated)
* - [id][FETCH_ID] - id and row-id, rest from cache, if available
* - [meta][FETCH_META] - metadata and row-id, rest from cache, if available
* - [cached-only][FETCH_CACHE] - only what is available in cache
* - [all][FetchMode.FETCH_ALL] (_**default**_) - all columns
* - [all-no-cache][FetchMode.FETCH_ALL_NO_CACHE] - all columns, but do not access cache (but cache is updated)
* - [id][FetchMode.FETCH_ID] - id and row-id, rest from cache, if available
* - [meta][FetchMode.FETCH_META] - metadata and row-id, rest from cache, if available
* - [all-but-feature][FetchMode.FETCH_ALL_BUT_FEATURE] - all, except for the payload
* - [cached-only][FetchMode.FETCH_CACHE] - only what is available in cache
*
* @param tupleNumbers a list of [tuple-numbers][TupleNumber] of the rows to load.
* @param mode the fetch mode.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,13 @@ interface IStorage : AutoCloseable {
*/
val adminOptions: SessionOptions

/**
* The hard-cap (limit) of the storage. No result-set every should become bigger than this amount of features.
*
* Setting the value is optionally support, storages may throw an [NakshaError.UNSUPPORTED_OPERATION] exception, when trying to modify the hard-cap, or they may only allow certain values and throw an [NakshaError.ILLEGAL_ARGUMENT] exception, if the value too big. A negative value is changed into [Int.MAX_VALUE], which means no hard-cap (if supported by the storage).
*/
var hardCap: Int

/**
* Tests if this storage is initialized, so [initStorage] has been called.
* @return _true_ if this storage is initialized; _false_ otherwise.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@ import kotlin.js.JsStatic
import kotlin.jvm.JvmField
import kotlin.jvm.JvmStatic

// TODO: Maybe we may add a middle-ground: gzip(string_agg($tuple_number||$id::bytea,'\x00'::bytea))

/**
* A helper that wraps a byte-array that contains a metadata byte-array.
* ```sql
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@ import kotlin.js.JsStatic
import kotlin.jvm.JvmField
import kotlin.jvm.JvmStatic

// TODO: Maybe we may add a middle-ground: gzip(string_agg($tuple_number||$id::bytea,'\x00'::bytea))

/**
* A helper that wraps a byte-array that contains one to n row-ids.
* ```sql
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import naksha.base.NullableProperty
import naksha.base.AnyObject
import naksha.model.request.query.TupleColumn
import naksha.model.request.query.SortOrder
import naksha.model.request.query.SortOrder.SortOrderCompanion.ANY
import naksha.model.request.query.SortOrder.SortOrderCompanion.DESCENDING
import kotlin.js.JsExport
import kotlin.js.JsName
Expand Down Expand Up @@ -89,6 +90,12 @@ open class OrderBy() : AnyObject() {
*/
var next by ORDER_BY_NULL

/**
* Tests if this represents deterministic ordering, which means that no specific column is selected (`null`), the order is [Any], and no other conditions are given ([next] = `null`).
* @return _true_ if this represents the deterministic order; _false_ otherwise.
*/
fun isDeterministic(): Boolean = column == null && order == ANY && next == null

override fun equals(other: Any?): Boolean {
if (other !is OrderBy) return false
return column == other.column
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,8 @@ open class ReadFeatures() : ReadRequest() {
*
* The defaults to 1, which means only the latest version, being closest to the given maximal [version] should be returned, if no [version] given, the latest version is meant.
*
* This parameter is ignored for queries to a _GUID_, because a _GUID_ already identifies an exact version. The query requires that [queryHistory] is _true_, if this value is not _1_ (the default) and [queryHistory] is _false_, the request will be rejected with [naksha.model.NakshaError.ILLEGAL_ARGUMENT].
*
* If multiple versions are requested, the execution may become drastically slower, therefore this feature should be used with care!
* @since 3.0.0
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ open class ReadRequest : Request() {
/**
* A soft-cap, so the amount of rows the client needs.
*
* If _null_, the storage will automatically decide for some soft-cap value. If all results are needed, setting it to [Int.MAX_VALUE] should be considered. If the soft-cap (_limit_) is bigger than what the storage supports as hard-cap, the hard-cap is used by the storage. For example `lib-psql` has a default hard-cap of 1,000,000, and therefore will never fetch more than one million rows, even when requested.
* If _null_, the storage will automatically decide for some hard-cap value. If all results are needed, setting it to [Int.MAX_VALUE] should be considered. If the soft-cap (_limit_) is bigger than what the storage supports as hard-cap, the hard-cap is used by the storage. For example `lib-psql` has a default hard-cap of 1,000,000, and therefore will never fetch more than one million rows, even when requested.
*
* To query more than the hard-cap of a storage, a streaming processing is needed. The interface for this is not yet designed, but may come with later model specifications.
* @since 3.0.0
Expand All @@ -36,13 +36,13 @@ open class ReadRequest : Request() {
/**
* A parameter to tell the storage if the client wants a handle.
*
* If _true_, the storage need to always generate the full result-set. It does not need to load all features into memory all the time, but as soon as a handle should be generated, an ordered result-set is needed, which requires to fetch all results to order them. Therefore, the storage at least need to generate the list of all [row identifiers][naksha.model.TupleNumber] being part of the result, then ordering them, optimally only by `version` and `uid`, which does not require to load all the row data. This is needed to be able to generate a handle from it (so to seek within the result-set).
* If _true_, the storage need to always generate the full result-set. It does not need to load all features into memory all the time, but as soon as a handle should be generated, an ordered result-set is needed, which requires to fetch all results to order them. Therefore, the storage at least need to generate the list of all [tuple-numbers][naksha.model.TupleNumber] being part of the result, then ordering them, optimally only by `version` and `uid`, which does not require to load all the row data. This is needed to be able to generate a handle from it (so to seek within the result-set).
*
* If the storage need to apply any filter-lambdas or perform a _property_ search (which is as well an intrinsic filtering lambda), it at least need to load as many results as the [limit] describes from the storage into memory.
*
* A middle ground is to order by data that is part of the [metadata][naksha.model.Metadata]. This requires the storage to load all rows with their metadata into memory, but it does not yet need to load the feature itself, nor the geometry, tags or attachment.
*
* The worst case is an order by something very custom, when requested, the storage needs not only the [row identifiers][naksha.model.TupleNumber], but the full rows with all data. In that case all results are loaded into memory, filtered, and eventually ordered.
* The worst case is an order by something very custom, when requested, the storage needs not only the [tuple-numbers][naksha.model.TupleNumber], but the full rows with all data. In that case all results are loaded into memory, filtered, and eventually ordered.
* @since 3.0.0
*/
var returnHandle by BOOLEAN
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ open class Request : AnyObject() {
protected open fun defaultRowOptions() : ReturnColumns = ReturnColumns.all()

/**
* Options of what data is needed by the [resultFilters].
* Options of what data is needed by the [resultFilters] and the client (defaults for [SuccessResponse.features]).
*
* The columns of a [tuple][naksha.model.Tuple] that are needed in the [resultFilters]. Actually, if any [resultFilters] are set, this causes the API to invoke [naksha.model.ISession.fetchTuples] before delivering the [ResultTuple] to the [resultFilters].
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ open class ResultTuple(
/**
* The feature-id.
*
* Can be _null_, when not yet fetched from the storage, use [IStorage.fetchTuples].
* Can be _null_, when not yet fetched from the storage, use [ISession.fetchTuples].
*
* When ordering by feature-id, the storage should load the feature identifiers together with the row identifiers. This operation will be slower than loading only the row identifiers, but still fast enough. However, at many places it is needed, like to create seekable views.
*/
Expand All @@ -43,7 +43,7 @@ open class ResultTuple(
/**
* If the row is already in the cache, the reference to the row.
*
* Can be _null_, when not yet fetched from the storage, use [IStorage.fetchTuples] or when [op] is [PURGED][ExecutedOp.PURGED] or [RETAINED][ExecutedOp.RETAINED].
* Can be _null_, when not yet fetched from the storage, use [ISession.fetchTuples] or when [op] is [PURGED][ExecutedOp.PURGED] or [RETAINED][ExecutedOp.RETAINED].
*/
@JvmField var tuple: Tuple?
) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ import kotlin.reflect.KClass
* WITH geo AS (
* SELECT id
* FROM table
* WHERE st_intersects(naksha_geometry(flags,geo), ?)
* WHERE st_intersects(naksha_geometry(geo,flags), ?)
* )
* WITH tags AS (
* SELECT id
Expand Down Expand Up @@ -192,7 +192,7 @@ ${if (addFillFactor) "WITH (fillfactor="+if (table.isVolatile) "65)" else "100)"
self.createFn = Fx2 { conn, table ->
conn.execute(
self.sql(
"""gist (naksha_geometry($c_flags,$c_geo), $c_id, $c_txn, $c_uid)""",
"""gist (naksha_geometry($c_geo, $c_flags), $c_id, $c_txn, $c_uid)""",
table, unique = false, addFillFactor = true
)
).close()
Expand All @@ -209,7 +209,7 @@ ${if (addFillFactor) "WITH (fillfactor="+if (table.isVolatile) "65)" else "100)"
self.createFn = Fx2 { conn, table ->
conn.execute(
self.sql(
"""sp-gist (naksha_geometry($c_flags,$c_geo), $c_id, $c_txn, $c_uid)""",
"""sp-gist (naksha_geometry($c_geo, $c_flags), $c_id, $c_txn, $c_uid)""",
table, unique = false, addFillFactor = true
)
).close()
Expand Down
Loading

0 comments on commit c7072d0

Please sign in to comment.