node_modules ignore

This commit is contained in:
2025-05-08 23:43:47 +02:00
parent e19d52f172
commit 4574544c9f
65041 changed files with 10593536 additions and 0 deletions

View File

@@ -0,0 +1,2 @@
export { default as ReadableQuery } from './readable';
//# sourceMappingURL=index.d.ts.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../../src/query/helpers/streams/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,OAAO,IAAI,aAAa,EAAE,MAAM,YAAY,CAAC"}

View File

@@ -0,0 +1,39 @@
/// <reference types="node" />
import { Readable } from 'stream';
import type { Knex } from 'knex';
import type { QueryBuilder } from '../../query-builder';
import type { Database } from '../../..';
import { Meta } from '../../../metadata';
declare const knexPerformingQuery: unique symbol;
interface ReadableStrapiQueryOptions {
qb: QueryBuilder;
uid: string;
db: Database;
mapResults?: boolean;
batchSize?: number;
}
declare class ReadableStrapiQuery extends Readable {
_offset: number;
_limit: number | null;
_fetched: number;
_query: Knex.QueryBuilder;
_qb: QueryBuilder;
_db: Database;
_uid: string;
_meta: Meta;
_batchSize: number;
_mapResults: boolean;
[knexPerformingQuery]: boolean;
constructor({ qb, db, uid, mapResults, batchSize }: ReadableStrapiQueryOptions);
_destroy(err: Error, cb: (err?: Error) => void): void;
/**
* Custom ._read() implementation
*
* NOTE: Here "size" means the number of entities to be read from the database.
* Not the actual byte size, as it would mean that we need to return partial entities.
*
*/
_read(size: number): Promise<void>;
}
export default ReadableStrapiQuery;
//# sourceMappingURL=readable.d.ts.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"readable.d.ts","sourceRoot":"","sources":["../../../../src/query/helpers/streams/readable.ts"],"names":[],"mappings":";AAAA,OAAO,EAAE,QAAQ,EAAE,MAAM,QAAQ,CAAC;AAElC,OAAO,KAAK,EAAE,IAAI,EAAE,MAAM,MAAM,CAAC;AACjC,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,qBAAqB,CAAC;AACxD,OAAO,KAAK,EAAE,QAAQ,EAAE,MAAM,UAAU,CAAC;AAIzC,OAAO,EAAE,IAAI,EAAE,MAAM,mBAAmB,CAAC;AAGzC,QAAA,MAAM,mBAAmB,eAAgC,CAAC;AAE1D,UAAU,0BAA0B;IAClC,EAAE,EAAE,YAAY,CAAC;IACjB,GAAG,EAAE,MAAM,CAAC;IACZ,EAAE,EAAE,QAAQ,CAAC;IACb,UAAU,CAAC,EAAE,OAAO,CAAC;IACrB,SAAS,CAAC,EAAE,MAAM,CAAC;CACpB;AAED,cAAM,mBAAoB,SAAQ,QAAQ;IACxC,OAAO,EAAE,MAAM,CAAC;IAEhB,MAAM,EAAE,MAAM,GAAG,IAAI,CAAC;IAEtB,QAAQ,EAAE,MAAM,CAAC;IAEjB,MAAM,EAAE,IAAI,CAAC,YAAY,CAAC;IAE1B,GAAG,EAAE,YAAY,CAAC;IAElB,GAAG,EAAE,QAAQ,CAAC;IAEd,IAAI,EAAE,MAAM,CAAC;IAEb,KAAK,EAAE,IAAI,CAAC;IAEZ,UAAU,EAAE,MAAM,CAAC;IAEnB,WAAW,EAAE,OAAO,CAAC;IAErB,CAAC,mBAAmB,CAAC,EAAE,OAAO,CAAC;gBAEnB,EAAE,EAAE,EAAE,EAAE,EAAE,GAAG,EAAE,UAAiB,EAAE,SAAe,EAAE,EAAE,0BAA0B;IAoC3F,QAAQ,CAAC,GAAG,EAAE,KAAK,EAAE,EAAE,EAAE,CAAC,GAAG,CAAC,EAAE,KAAK,KAAK,IAAI;IAU9C;;;;;;OAMG;IACG,KAAK,CAAC,IAAI,EAAE,MAAM;CAgGzB;AAED,eAAe,mBAAmB,CAAC"}

View File

@@ -0,0 +1,131 @@
'use strict';
var stream = require('stream');
var _ = require('lodash/fp');
var apply = require('../populate/apply.js');
var transform = require('../transform.js');
const knexQueryDone = Symbol('knexQueryDone');
const knexPerformingQuery = Symbol('knexPerformingQuery');
class ReadableStrapiQuery extends stream.Readable {
_destroy(err, cb) {
// If the stream is destroyed while a query is being made, then wait for a
// kQueryDone event to be emitted before actually destroying the stream
if (this[knexPerformingQuery]) {
this.once(knexQueryDone, (er)=>cb(err || er));
} else {
cb(err);
}
}
/**
* Custom ._read() implementation
*
* NOTE: Here "size" means the number of entities to be read from the database.
* Not the actual byte size, as it would mean that we need to return partial entities.
*
*/ async _read(size) {
const query = this._query;
// Remove the original offset & limit properties from the query
// Theoretically, they would be replaced by calling them again, but this is just to be sure
query.clear('limit').clear('offset');
// Define the maximum read size based on the limit and the requested size
// NOTE: size is equal to _batchSize by default. Since we want to allow customizing it on
// the fly, we need to use its value instead of batchSize when computing the maxReadSize value
const maxReadSize = // if no limit is defined in the query, use the given size,
// otherwise, use the smallest value between the two
this._limit === null ? size : Math.min(size, this._limit);
// Compute the limit for the next query
const limit = // If a limit is defined
this._limit !== null && // And reading `maxReadSize` would fetch too many entities (> _limit)
this._fetched + maxReadSize > this._limit ? this._limit - this._fetched : maxReadSize;
// If we don't have anything left to read (_limit === _fetched),
// don't bother making the query and end the stream by pushing null
if (limit <= 0) {
this.push(null);
return;
}
// Compute the offset (base offset + number of entities already fetched)
const offset = this._offset + this._fetched;
// Update the query with the new values (offset + limit)
query.offset(offset).limit(limit);
// Lock the ._destroy()
this[knexPerformingQuery] = true;
let results;
let count;
let err;
try {
// Execute the query and store the results & count
results = await query;
const { populate } = this._qb.state;
// Applies the populate if needed
if (populate) {
await apply(results, populate, {
qb: this._qb,
uid: this._uid,
db: this._db
});
}
// Map results if asked to
if (this._mapResults) {
results = transform.fromRow(this._meta, results);
}
count = results.length;
} catch (e) {
err = e;
}
// Unlock the ._destroy()
this[knexPerformingQuery] = false;
// Tell ._destroy() that it's now safe to close the db connection
if (this.destroyed) {
this.emit(knexQueryDone);
return;
}
// If there is an error, destroy with the given error
if (err) {
this.destroy(err);
return;
}
// Update the amount of fetched entities
this._fetched += count;
// While there is at least one value to unpack
for (const result of results){
this.push(result);
}
// If the amount of fetched entities is smaller than the
// maximum read size, Then push null to close the stream
if (this._fetched === this._limit || count < this._batchSize) {
this.push(null);
}
}
constructor({ qb, db, uid, mapResults = true, batchSize = 500 }){
super({
objectMode: true,
highWaterMark: batchSize
});
// Extract offset & limit from the query-builder's state
const { offset, limit } = qb.state;
// Original offset value
this._offset = _.isFinite(offset) ? Number(offset) : 0;
// Max amount of entities to fetch, force null as undefined value
this._limit = _.isFinite(limit) ? Number(limit) : null;
// Total amount of entities fetched
this._fetched = 0;
/**
* Original query
*/ this._query = qb.getKnexQuery();
// Query Builder instance
this._qb = qb;
// Database related properties
this._db = db;
this._uid = uid;
this._meta = db.metadata.get(uid);
// Stream params
this._batchSize = batchSize;
this._mapResults = mapResults;
// States
this[knexPerformingQuery] = false;
}
}
module.exports = ReadableStrapiQuery;
//# sourceMappingURL=readable.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,129 @@
import { Readable } from 'stream';
import { isFinite } from 'lodash/fp';
import applyPopulate from '../populate/apply.mjs';
import { fromRow } from '../transform.mjs';
const knexQueryDone = Symbol('knexQueryDone');
const knexPerformingQuery = Symbol('knexPerformingQuery');
class ReadableStrapiQuery extends Readable {
_destroy(err, cb) {
// If the stream is destroyed while a query is being made, then wait for a
// kQueryDone event to be emitted before actually destroying the stream
if (this[knexPerformingQuery]) {
this.once(knexQueryDone, (er)=>cb(err || er));
} else {
cb(err);
}
}
/**
* Custom ._read() implementation
*
* NOTE: Here "size" means the number of entities to be read from the database.
* Not the actual byte size, as it would mean that we need to return partial entities.
*
*/ async _read(size) {
const query = this._query;
// Remove the original offset & limit properties from the query
// Theoretically, they would be replaced by calling them again, but this is just to be sure
query.clear('limit').clear('offset');
// Define the maximum read size based on the limit and the requested size
// NOTE: size is equal to _batchSize by default. Since we want to allow customizing it on
// the fly, we need to use its value instead of batchSize when computing the maxReadSize value
const maxReadSize = // if no limit is defined in the query, use the given size,
// otherwise, use the smallest value between the two
this._limit === null ? size : Math.min(size, this._limit);
// Compute the limit for the next query
const limit = // If a limit is defined
this._limit !== null && // And reading `maxReadSize` would fetch too many entities (> _limit)
this._fetched + maxReadSize > this._limit ? this._limit - this._fetched : maxReadSize;
// If we don't have anything left to read (_limit === _fetched),
// don't bother making the query and end the stream by pushing null
if (limit <= 0) {
this.push(null);
return;
}
// Compute the offset (base offset + number of entities already fetched)
const offset = this._offset + this._fetched;
// Update the query with the new values (offset + limit)
query.offset(offset).limit(limit);
// Lock the ._destroy()
this[knexPerformingQuery] = true;
let results;
let count;
let err;
try {
// Execute the query and store the results & count
results = await query;
const { populate } = this._qb.state;
// Applies the populate if needed
if (populate) {
await applyPopulate(results, populate, {
qb: this._qb,
uid: this._uid,
db: this._db
});
}
// Map results if asked to
if (this._mapResults) {
results = fromRow(this._meta, results);
}
count = results.length;
} catch (e) {
err = e;
}
// Unlock the ._destroy()
this[knexPerformingQuery] = false;
// Tell ._destroy() that it's now safe to close the db connection
if (this.destroyed) {
this.emit(knexQueryDone);
return;
}
// If there is an error, destroy with the given error
if (err) {
this.destroy(err);
return;
}
// Update the amount of fetched entities
this._fetched += count;
// While there is at least one value to unpack
for (const result of results){
this.push(result);
}
// If the amount of fetched entities is smaller than the
// maximum read size, Then push null to close the stream
if (this._fetched === this._limit || count < this._batchSize) {
this.push(null);
}
}
constructor({ qb, db, uid, mapResults = true, batchSize = 500 }){
super({
objectMode: true,
highWaterMark: batchSize
});
// Extract offset & limit from the query-builder's state
const { offset, limit } = qb.state;
// Original offset value
this._offset = isFinite(offset) ? Number(offset) : 0;
// Max amount of entities to fetch, force null as undefined value
this._limit = isFinite(limit) ? Number(limit) : null;
// Total amount of entities fetched
this._fetched = 0;
/**
* Original query
*/ this._query = qb.getKnexQuery();
// Query Builder instance
this._qb = qb;
// Database related properties
this._db = db;
this._uid = uid;
this._meta = db.metadata.get(uid);
// Stream params
this._batchSize = batchSize;
this._mapResults = mapResults;
// States
this[knexPerformingQuery] = false;
}
}
export { ReadableStrapiQuery as default };
//# sourceMappingURL=readable.mjs.map

File diff suppressed because one or more lines are too long