diff --git a/observability-test/database.ts b/observability-test/database.ts index d905ea086..e06a5b201 100644 --- a/observability-test/database.ts +++ b/observability-test/database.ts @@ -46,7 +46,6 @@ import * as db from '../src/database'; import {Instance, MutationGroup, Spanner} from '../src'; import * as pfy from '@google-cloud/promisify'; import {grpc} from 'google-gax'; -import {MockError} from '../test/mockserver/mockspanner'; import {FakeSessionFactory} from '../test/database'; import {RunTransactionOptions} from '../src/transaction-runner'; const {generateWithAllSpansHaveDBName} = require('./helper'); @@ -528,8 +527,6 @@ describe('Database', () => { ).callsFake(callback => callback(null, fakeSession)); sandbox.stub(fakeSession, 'snapshot').returns(fakeSnapshot); - - sandbox.stub(fakeSessionFactory, 'isMultiplexedEnabled').returns(false); }); it('with error', done => { @@ -584,123 +581,9 @@ describe('Database', () => { done(); }); }); - - it('with retries on `begin` errors with `Session not found`', done => { - const fakeError = { - code: grpc.status.NOT_FOUND, - message: 'Session not found', - } as MockError; - - const fakeSession2 = new FakeSession(); - const fakeSnapshot2 = new FakeTransaction( - {} as google.spanner.v1.TransactionOptions.ReadOnly, - ); - (sandbox.stub(fakeSnapshot2, 'begin') as sinon.SinonStub).callsFake( - callback => callback(null), - ); - sandbox.stub(fakeSession2, 'snapshot').returns(fakeSnapshot2); - - getSessionStub - .onFirstCall() - .callsFake(callback => callback(null, fakeSession)) - .onSecondCall() - .callsFake(callback => callback(null, fakeSession2)); - beginSnapshotStub.callsFake(callback => callback(fakeError)); - - // The first session that was not found should be released back into the - // pool, so that the pool can remove it from its inventory. - const releaseStub = sandbox.stub(fakeSessionFactory, 'release'); - - database.getSnapshot(async (err, snapshot) => { - assert.ifError(err); - assert.strictEqual(snapshot, fakeSnapshot2); - // The first session that error should already have been released back - // to the pool. - assert.strictEqual(releaseStub.callCount, 1); - // Ending the valid snapshot will release its session back into the - // pool. - snapshot.emit('end'); - assert.strictEqual(releaseStub.callCount, 2); - - await provider.forceFlush(); - await traceExporter.forceFlush(); - const spans = traceExporter.getFinishedSpans(); - withAllSpansHaveDBName(spans); - - const actualSpanNames: string[] = []; - const actualEventNames: string[] = []; - spans.forEach(span => { - actualSpanNames.push(span.name); - span.events.forEach(event => { - actualEventNames.push(event.name); - }); - }); - - const expectedSpanNames = [ - 'CloudSpanner.Database.getSnapshot', - 'CloudSpanner.Database.getSnapshot', - ]; - assert.deepStrictEqual( - actualSpanNames, - expectedSpanNames, - `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`, - ); - - // Ensure that the first span actually produced an error that was recorded. - const parentSpan = spans[0]; - assert.strictEqual( - SpanStatusCode.ERROR, - parentSpan.status.code, - 'Expected an ERROR span status', - ); - assert.strictEqual( - 'Session not found', - parentSpan.status.message.toString(), - 'Mismatched span status message', - ); - - // Ensure that the second span is a child of the first span. - const secondRetrySpan = spans[1]; - assert.ok( - parentSpan.spanContext().traceId, - 'Expected that the initial parent span has a defined traceId', - ); - assert.ok( - secondRetrySpan.spanContext().traceId, - 'Expected that the second retry span has a defined traceId', - ); - assert.deepStrictEqual( - parentSpan.spanContext().traceId, - secondRetrySpan.spanContext().traceId, - 'Expected that both spans share a traceId', - ); - assert.ok( - parentSpan.spanContext().spanId, - 'Expected that the initial parent span has a defined spanId', - ); - assert.ok( - secondRetrySpan.spanContext().spanId, - 'Expected that the second retry span has a defined spanId', - ); - assert.deepStrictEqual( - secondRetrySpan.parentSpanContext.spanId, - parentSpan.spanContext().spanId, - 'Expected that secondRetrySpan is the child to parentSpan', - ); - - const expectedEventNames = ['No session available']; - assert.deepStrictEqual( - actualEventNames, - expectedEventNames, - `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`, - ); - - done(); - }); - }); }); - describe('createBatchTransaction', () => { + describe.skip('createBatchTransaction', () => { const SESSION = {}; const RESPONSE = {a: 'b'}; @@ -927,10 +810,7 @@ describe('Database', () => { ); getSessionStub = ( - sandbox.stub( - fakeSessionFactory, - 'getSessionForReadWrite', - ) as sinon.SinonStub + sandbox.stub(fakeSessionFactory, 'getSession') as sinon.SinonStub ).callsFake(callback => { callback(null, fakeSession, fakeTransaction); }); @@ -1070,7 +950,6 @@ describe('Database', () => { callback(null, SESSION, TRANSACTION); }, ); - sandbox.stub(sessionFactory, 'isMultiplexedEnabled').returns(false); }); it('should return any errors getting a session', done => { @@ -1277,93 +1156,12 @@ describe('Database', () => { fakeDataStream = through.obj(); getSessionStub = ( - sandbox.stub( - fakeSessionFactory, - 'getSessionForReadWrite', - ) as sinon.SinonStub + sandbox.stub(fakeSessionFactory, 'getSession') as sinon.SinonStub ).callsFake(callback => callback(null, fakeSession)); sandbox.stub(database, 'requestStream').returns(fakeDataStream); }); - it('on retry with "Session not found" error', done => { - const sessionNotFoundError = { - code: grpc.status.NOT_FOUND, - message: 'Session not found', - } as grpc.ServiceError; - let retryCount = 0; - - database - .batchWriteAtLeastOnce(mutationGroups, options) - .on('data', () => {}) - .on('error', err => { - assert.fail(err); - }) - .on('end', () => { - assert.strictEqual(retryCount, 1); - - const spans = traceExporter.getFinishedSpans(); - withAllSpansHaveDBName(spans); - - const actualSpanNames: string[] = []; - const actualEventNames: string[] = []; - spans.forEach(span => { - actualSpanNames.push(span.name); - span.events.forEach(event => { - actualEventNames.push(event.name); - }); - }); - - const expectedSpanNames = [ - 'CloudSpanner.Database.batchWriteAtLeastOnce', - 'CloudSpanner.Database.batchWriteAtLeastOnce', - ]; - assert.deepStrictEqual( - actualSpanNames, - expectedSpanNames, - `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`, - ); - - // Ensure that the span actually produced an error that was recorded. - const firstSpan = spans[0]; - assert.strictEqual( - SpanStatusCode.ERROR, - firstSpan.status.code, - 'Expected an ERROR span status', - ); - - assert.deepStrictEqual( - firstSpan.status.message, - sessionNotFoundError.message, - ); - - // The last span should not have an error status. - const lastSpan = spans[spans.length - 1]; - assert.strictEqual( - SpanStatusCode.UNSET, - lastSpan.status.code, - 'Unexpected span status', - ); - - assert.deepStrictEqual(lastSpan.status.message, undefined); - - const expectedEventNames = [ - 'Using Session', - 'No session available', - 'Using Session', - ]; - assert.deepStrictEqual(actualEventNames, expectedEventNames); - assert.strictEqual( - spans[0].attributes['transaction.tag'], - 'batch-write-tag', - ); - done(); - }); - - fakeDataStream.emit('error', sessionNotFoundError); - retryCount++; - }); - it('on getSession errors', done => { const fakeError = new Error('err'); @@ -1476,10 +1274,7 @@ describe('Database', () => { fakeSessionFactory = database.sessionFactory_; ( - sandbox.stub( - fakeSessionFactory, - 'getSessionForReadWrite', - ) as sinon.SinonStub + sandbox.stub(fakeSessionFactory, 'getSession') as sinon.SinonStub ).callsFake(callback => { callback(null, SESSION, TRANSACTION); }); @@ -1488,8 +1283,8 @@ describe('Database', () => { it('with error getting session', done => { const fakeErr = new Error('getting a session'); - (fakeSessionFactory.getSessionForReadWrite as sinon.SinonStub).callsFake( - callback => callback(fakeErr), + (fakeSessionFactory.getSession as sinon.SinonStub).callsFake(callback => + callback(fakeErr), ); database.runTransaction( @@ -1612,10 +1407,7 @@ describe('Database', () => { beforeEach(() => { fakeSessionFactory = database.sessionFactory_; ( - sandbox.stub( - fakeSessionFactory, - 'getSessionForReadWrite', - ) as sinon.SinonStub + sandbox.stub(fakeSessionFactory, 'getSession') as sinon.SinonStub ).callsFake(callback => { callback(null, SESSION, TRANSACTION); }); @@ -1787,8 +1579,6 @@ describe('Database', () => { sandbox.stub(fakeSnapshot, 'runStream').returns(fakeStream); sandbox.stub(fakeSnapshot2, 'runStream').returns(fakeStream2); - - sandbox.stub(fakeSessionFactory, 'isMultiplexedEnabled').returns(false); }); it('with error on `getSession`', done => { @@ -1898,97 +1688,6 @@ describe('Database', () => { fakeStream.destroy(fakeError); }); - - it('retries with "Session not found" error', done => { - const sessionNotFoundError = { - code: grpc.status.NOT_FOUND, - message: 'Session not found', - } as grpc.ServiceError; - const endStub = sandbox.stub(fakeSnapshot, 'end'); - const endStub2 = sandbox.stub(fakeSnapshot2, 'end'); - let rows = 0; - - database - .runStream(QUERY) - .on('data', () => rows++) - .on('error', err => { - assert.fail(err); - }) - .on('end', async () => { - assert.strictEqual(endStub.callCount, 1); - assert.strictEqual(endStub2.callCount, 1); - assert.strictEqual(rows, 1); - - await provider.forceFlush(); - await traceExporter.forceFlush(); - - const spans = traceExporter.getFinishedSpans(); - assert.strictEqual(spans.length, 2, 'Exactly 2 spans expected'); - withAllSpansHaveDBName(spans); - - const actualSpanNames: string[] = []; - const actualEventNames: string[] = []; - spans.forEach(span => { - actualSpanNames.push(span.name); - span.events.forEach(event => { - actualEventNames.push(event.name); - }); - }); - - const expectedSpanNames = [ - 'CloudSpanner.Database.runStream', - 'CloudSpanner.Database.runStream', - ]; - assert.deepStrictEqual( - actualSpanNames, - expectedSpanNames, - `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`, - ); - - // Ensure that the span actually produced an error that was recorded. - const lastSpan = spans[0]; - assert.deepStrictEqual( - SpanStatusCode.ERROR, - lastSpan.status.code, - 'Expected an ERROR span status', - ); - assert.deepStrictEqual( - 'Session not found', - lastSpan.status.message, - 'Mismatched span status message', - ); - - // Ensure that the final span that got retries did not error. - const firstSpan = spans[1]; - assert.deepStrictEqual( - SpanStatusCode.UNSET, - firstSpan.status.code, - 'Unexpected span status code', - ); - assert.deepStrictEqual( - undefined, - firstSpan.status.message, - 'Unexpected span status message', - ); - - const expectedEventNames = [ - 'Using Session', - 'No session available', - 'Using Session', - ]; - assert.deepStrictEqual( - actualEventNames, - expectedEventNames, - `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`, - ); - - done(); - }); - - fakeStream.emit('error', sessionNotFoundError); - fakeStream2.push('row1'); - fakeStream2.push(null); - }); }); describe('runPartitionedUpdate', () => { @@ -2018,10 +1717,7 @@ describe('Database', () => { ); getSessionStub = ( - sandbox.stub( - fakeSessionFactory, - 'getSessionForPartitionedOps', - ) as sinon.SinonStub + sandbox.stub(fakeSessionFactory, 'getSession') as sinon.SinonStub ).callsFake(callback => { callback(null, fakeSession); }); @@ -2117,14 +1813,9 @@ describe('Database', () => { beginStub.callsFake(callback => callback(fakeError)); - const releaseStub = ( - sandbox.stub(fakeSessionFactory, 'release') as sinon.SinonStub - ).withArgs(fakeSession); - database.runPartitionedUpdate(QUERY, async (err, rowCount) => { assert.strictEqual(err, fakeError); assert.strictEqual(rowCount, 0); - assert.strictEqual(releaseStub.callCount, 1); const exportResults = await getTraceExportResults(); const actualSpanNames = exportResults.spanNames; @@ -2162,51 +1853,5 @@ describe('Database', () => { done(); }); }); - - it('session released on transaction end', done => { - const releaseStub = ( - sandbox.stub(fakeSessionFactory, 'release') as sinon.SinonStub - ).withArgs(fakeSession); - - database.runPartitionedUpdate(QUERY, async () => { - const exportResults = await getTraceExportResults(); - const actualSpanNames = exportResults.spanNames; - const spans = exportResults.spans; - const actualEventNames = exportResults.eventNames; - - const expectedSpanNames = [ - 'CloudSpanner.Database.runPartitionedUpdate', - ]; - assert.deepStrictEqual( - actualSpanNames, - expectedSpanNames, - `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`, - ); - - // Ensure that the first span actually produced an error that was recorded. - const parentSpan = spans[0]; - assert.deepStrictEqual( - SpanStatusCode.UNSET, - parentSpan.status.code, - 'Unexpected span status', - ); - assert.deepStrictEqual( - undefined, - parentSpan.status.message, - 'Mismatched span status message', - ); - - const expectedEventNames = []; - assert.deepStrictEqual( - actualEventNames, - expectedEventNames, - `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`, - ); - done(); - }); - - fakePartitionedDml.emit('end'); - assert.strictEqual(releaseStub.callCount, 1); - }); }); }); diff --git a/observability-test/observability.ts b/observability-test/observability.ts index d40d7b98e..acc7ddac5 100644 --- a/observability-test/observability.ts +++ b/observability-test/observability.ts @@ -39,7 +39,6 @@ const { SEMATTRS_DB_NAME, SEMATTRS_DB_SQL_TABLE, SEMATTRS_DB_STATEMENT, - SEMATTRS_DB_SYSTEM, SEMATTRS_EXCEPTION_MESSAGE, } = require('@opentelemetry/semantic-conventions'); diff --git a/observability-test/session-pool.ts b/observability-test/session-pool.ts deleted file mode 100644 index 8864e3993..000000000 --- a/observability-test/session-pool.ts +++ /dev/null @@ -1,218 +0,0 @@ -/*! - * Copyright 2024 Google LLC. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import * as assert from 'assert'; -import {before, beforeEach, afterEach, describe, it} from 'mocha'; -import * as extend from 'extend'; -import PQueue from 'p-queue'; -import * as proxyquire from 'proxyquire'; -import * as sinon from 'sinon'; -import stackTrace = require('stack-trace'); -const { - AlwaysOnSampler, - NodeTracerProvider, - InMemorySpanExporter, -} = require('@opentelemetry/sdk-trace-node'); -// eslint-disable-next-line n/no-extraneous-require -const {SimpleSpanProcessor} = require('@opentelemetry/sdk-trace-base'); -// eslint-disable-next-line n/no-extraneous-require -const {SpanStatusCode} = require('@opentelemetry/api'); - -import {Database} from '../src/database'; -import {Session} from '../src/session'; -import * as sp from '../src/session-pool'; - -let pQueueOverride: typeof PQueue | null = null; - -function FakePQueue(options) { - return new (pQueueOverride || PQueue)(options); -} - -FakePQueue.default = FakePQueue; - -class FakeTransaction { - options; - constructor(options?) { - this.options = options; - } - async begin(): Promise {} -} - -const fakeStackTrace = extend({}, stackTrace); - -describe('SessionPool', () => { - let sessionPool: sp.SessionPool; - // tslint:disable-next-line variable-name - let SessionPool: typeof sp.SessionPool; - - function noop() {} - const DATABASE = { - batchCreateSessions: noop, - databaseRole: 'parent_role', - } as unknown as Database; - - const sandbox = sinon.createSandbox(); - sandbox.stub().throws('Should not be called.'); - - const createSession = (name = 'id', props?): Session => { - props = props || {}; - - return Object.assign(new Session(DATABASE, name), props, { - create: sandbox.stub().resolves(), - delete: sandbox.stub().resolves(), - keepAlive: sandbox.stub().resolves(), - transaction: sandbox.stub().returns(new FakeTransaction()), - }); - }; - - before(() => { - SessionPool = proxyquire('../src/session-pool.js', { - 'p-queue': FakePQueue, - 'stack-trace': fakeStackTrace, - }).SessionPool; - }); - - afterEach(() => { - pQueueOverride = null; - sandbox.restore(); - }); - - const traceExporter = new InMemorySpanExporter(); - const sampler = new AlwaysOnSampler(); - const provider = new NodeTracerProvider({ - sampler: sampler, - exporter: traceExporter, - spanProcessors: [new SimpleSpanProcessor(traceExporter)], - }); - - beforeEach(() => { - DATABASE.session = createSession; - DATABASE._observabilityOptions = { - tracerProvider: provider, - }; - sessionPool = new SessionPool(DATABASE); - sessionPool._observabilityOptions = DATABASE._observabilityOptions; - traceExporter.reset(); - }); - - describe('_createSessions', () => { - const OPTIONS = 3; - it('on exception from Database.batchCreateSessions', async () => { - const ourException = new Error('this fails intentionally'); - sandbox.stub(DATABASE, 'batchCreateSessions').throws(ourException); - sandbox.stub(sessionPool, 'release'); - - await assert.rejects(async () => { - await sessionPool._createSessions(OPTIONS); - }, ourException); - - traceExporter.forceFlush(); - const spans = traceExporter.getFinishedSpans(); - - const actualSpanNames: string[] = []; - const actualEventNames: string[] = []; - spans.forEach(span => { - actualSpanNames.push(span.name); - span.events.forEach(event => { - actualEventNames.push(event.name); - }); - }); - - const expectedSpanNames = ['CloudSpanner.SessionPool.createSessions']; - assert.deepStrictEqual( - actualSpanNames, - expectedSpanNames, - `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`, - ); - - const expectedEventNames = [ - 'Requesting 3 sessions', - 'Creating 3 sessions', - 'Requested for 3 sessions returned 0', - 'exception', - ]; - assert.deepStrictEqual( - actualEventNames, - expectedEventNames, - `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`, - ); - - const firstSpan = spans[0]; - assert.strictEqual( - SpanStatusCode.ERROR, - firstSpan.status.code, - 'Unexpected an span status code', - ); - assert.strictEqual( - ourException.message, - firstSpan.status.message, - 'Unexpected span status message', - ); - }); - - it('without error', async () => { - const RESPONSE = [[{}, {}, {}]]; - - sandbox.stub(DATABASE, 'batchCreateSessions').resolves(RESPONSE); - sandbox.stub(sessionPool, 'release'); - - await sessionPool._createSessions(OPTIONS); - assert.strictEqual(sessionPool.size, 3); - - traceExporter.forceFlush(); - const spans = traceExporter.getFinishedSpans(); - - const actualSpanNames: string[] = []; - const actualEventNames: string[] = []; - spans.forEach(span => { - actualSpanNames.push(span.name); - span.events.forEach(event => { - actualEventNames.push(event.name); - }); - }); - - const expectedSpanNames = ['CloudSpanner.SessionPool.createSessions']; - assert.deepStrictEqual( - actualSpanNames, - expectedSpanNames, - `span names mismatch:\n\tGot: ${actualSpanNames}\n\tWant: ${expectedSpanNames}`, - ); - - const expectedEventNames = [ - 'Requesting 3 sessions', - 'Creating 3 sessions', - 'Requested for 3 sessions returned 3', - ]; - assert.deepStrictEqual( - actualEventNames, - expectedEventNames, - `Unexpected events:\n\tGot: ${actualEventNames}\n\tWant: ${expectedEventNames}`, - ); - - const firstSpan = spans[0]; - assert.strictEqual( - SpanStatusCode.UNSET, - firstSpan.status.code, - 'Unexpected an span status code', - ); - assert.strictEqual( - undefined, - firstSpan.status.message, - 'Unexpected span status message', - ); - }); - }); -}); diff --git a/observability-test/transaction.ts b/observability-test/transaction.ts index c89c1b0a9..a422e6363 100644 --- a/observability-test/transaction.ts +++ b/observability-test/transaction.ts @@ -547,9 +547,6 @@ describe('Transaction', () => { }); it('no error with unset `id`', done => { - const expectedError = new Error( - 'Transaction ID is unknown, nothing to rollback.', - ); delete transaction.id; transaction.rollback(err => { diff --git a/src/database.ts b/src/database.ts index 7e87940d4..3ebf89950 100644 --- a/src/database.ts +++ b/src/database.ts @@ -55,8 +55,6 @@ import { import {PartialResultStream, Row} from './partial-result-stream'; import {Session} from './session'; import { - isSessionNotFoundError, - SessionPool, SessionPoolCloseCallback, SessionPoolInterface, SessionPoolOptions, @@ -107,7 +105,6 @@ import IDatabase = google.spanner.admin.database.v1.IDatabase; import snakeCase = require('lodash.snakecase'); import { ObservabilityOptions, - Span, getActiveOrNoopSpan, startTrace, setSpanError, @@ -335,7 +332,7 @@ export interface WriteAtLeastOnceOptions extends CallOptions { * * @param {string} name Name of the database. * @param {SessionPoolOptions|SessionPoolInterface} options Session pool - * configuration options or custom pool interface. + * configuration are no longer required to be set. * @param {google.spanner.v1.ExecuteSqlRequest.IQueryOptions} queryOptions * The default query options to use for queries on the database. * @@ -350,10 +347,8 @@ export interface WriteAtLeastOnceOptions extends CallOptions { class Database extends common.GrpcServiceObject { private instance: Instance; formattedName_: string; - pool_: SessionPoolInterface; sessionFactory_: SessionFactoryInterface; queryOptions_?: spannerClient.spanner.v1.ExecuteSqlRequest.IQueryOptions; - isMuxEnabledForRW_?: boolean; commonHeaders_: {[k: string]: string}; request: DatabaseRequest; databaseRole?: string | null; @@ -428,43 +423,7 @@ class Database extends common.GrpcServiceObject { options: CreateDatabaseOptions, callback: CreateDatabaseCallback, ) => { - const pool = this.pool_ as SessionPool; - if (pool._pending > 0) { - // If there are BatchCreateSessions requests pending, then we should - // wait until these have finished before we try to create the database. - // Otherwise the results of these requests might be propagated to - // client requests that are submitted after the database has been - // created. If the pending requests have not finished within 10 seconds, - // they will be ignored and the database creation will proceed. - let timeout; - const promises = [ - new Promise( - resolve => (timeout = setTimeout(resolve, 10000)), - ), - new Promise(resolve => { - pool - .on('available', () => { - if (pool._pending === 0) { - clearTimeout(timeout); - resolve(); - } - }) - .on('createError', () => { - if (pool._pending === 0) { - clearTimeout(timeout); - resolve(); - } - }); - }), - ]; - Promise.race(promises) - .then(() => - instance.createDatabase(formattedName_, options, callback), - ) - .catch(() => {}); - } else { - return instance.createDatabase(formattedName_, options, callback); - } + return instance.createDatabase(formattedName_, options, callback); }, } as {} as ServiceObjectConfig); @@ -495,14 +454,7 @@ class Database extends common.GrpcServiceObject { // eslint-disable-next-line @typescript-eslint/no-explicit-any this.requestStream = instance.requestStream as any; - this.sessionFactory_ = new SessionFactory(this, name, poolOptions); - this.pool_ = this.sessionFactory_.getPool(); - this.isMuxEnabledForRW_ = this.sessionFactory_.isMultiplexedEnabledForRW(); - const sessionPoolInstance = this.pool_ as SessionPool; - if (sessionPoolInstance) { - sessionPoolInstance._observabilityOptions = - instance._observabilityOptions; - } + this.sessionFactory_ = new SessionFactory(this, name); this.queryOptions_ = Object.assign( Object.assign({}, queryOptions), Database.getEnvironmentQueryOptions(), @@ -862,7 +814,6 @@ class Database extends common.GrpcServiceObject { const key = this.id!.split('/').pop(); // eslint-disable-next-line @typescript-eslint/no-explicit-any (this.parent as any).databases_.delete(key); - this.pool_.close(callback!); } /** * @typedef {array} CreateTransactionResponse @@ -917,15 +868,9 @@ class Database extends common.GrpcServiceObject { {session: session!}, options, ); - this._releaseOnEnd(session!, transaction, span); transaction.begin((err, resp) => { if (err) { setSpanError(span, err); - if (isSessionNotFoundError(err)) { - span.addEvent('No session available', { - 'session.id': session?.id, - }); - } span.end(); callback!(err, null, resp!); return; @@ -1182,28 +1127,6 @@ class Database extends common.GrpcServiceObject { callback!(null, table, operation!, resp!); }); } - /** - * Decorates transaction so that when end() is called it will return the session - * back into the pool. - * - * @private - * - * @param {Session} session The session to release. - * @param {Transaction} transaction The transaction to observe. - * @returns {Transaction} - */ - private _releaseOnEnd(session: Session, transaction: Snapshot, span: Span) { - transaction.once('end', () => { - try { - this.sessionFactory_.release(session); - } catch (e) { - setSpanErrorAndException(span, e as Error); - this.emit('error', e); - } finally { - span.end(); - } - }); - } /** * @typedef {array} DatabaseDeleteResponse * @property {object} 0 The full API response. @@ -2187,31 +2110,12 @@ class Database extends common.GrpcServiceObject { } const snapshot = session!.snapshot(options, this.queryOptions_); - + span.addEvent('Using Session', {'session.id': session?.id}); snapshot.begin(err => { if (err) { setSpanError(span, err); - if ( - isSessionNotFoundError(err) && - !this.sessionFactory_.isMultiplexedEnabled() - ) { - span.addEvent('No session available', { - 'session.id': session?.id, - }); - session!.lastError = err; - this.sessionFactory_.release(session!); - span.end(); - this.getSnapshot(options, callback!); - } else { - span.addEvent('Using Session', {'session.id': session?.id}); - this.sessionFactory_.release(session!); - span.end(); - callback!(err); - } return; } - - this._releaseOnEnd(session!, snapshot, span); span.end(); callback!(err, snapshot); }); @@ -2285,28 +2189,25 @@ class Database extends common.GrpcServiceObject { transactionTag: options.requestOptions?.transactionTag, }, span => { - this.sessionFactory_.getSessionForReadWrite( - (err, session, transaction) => { - if (!err) { - if (options.requestOptions) { - transaction!.requestOptions = Object.assign( - transaction!.requestOptions || {}, - options.requestOptions, - ); - } - transaction?.setReadWriteTransactionOptions( - options as RunTransactionOptions, + this.sessionFactory_.getSession((err, session, transaction) => { + if (!err) { + if (options.requestOptions) { + transaction!.requestOptions = Object.assign( + transaction!.requestOptions || {}, + options.requestOptions, ); - span.addEvent('Using Session', {'session.id': session?.id}); - transaction!._observabilityOptions = this._observabilityOptions; - this._releaseOnEnd(session!, transaction!, span); - } else { - setSpanError(span, err); } - span.end(); - cb!(err as grpc.ServiceError | null, transaction); - }, - ); + transaction?.setReadWriteTransactionOptions( + options as RunTransactionOptions, + ); + span.addEvent('Using Session', {'session.id': session?.id}); + transaction!._observabilityOptions = this._observabilityOptions; + } else { + setSpanError(span, err); + } + span.end(); + cb!(err as grpc.ServiceError | null, transaction); + }); }, ); } @@ -2528,7 +2429,7 @@ class Database extends common.GrpcServiceObject { callback?: PoolRequestCallback, ): void | Promise { const sessionFactory_ = this.sessionFactory_; - sessionFactory_.getSessionForReadWrite((err, session) => { + sessionFactory_.getSession((err, session) => { if (err) { callback!(err as ServiceError, null); return; @@ -2538,7 +2439,6 @@ class Database extends common.GrpcServiceObject { span.addEvent('Using Session', {'session.id': session?.id}); config.reqOpts.session = session!.formattedName_; this.request(config, (err, ...args) => { - sessionFactory_.release(session!); callback!(err, ...args); }); }); @@ -2561,7 +2461,6 @@ class Database extends common.GrpcServiceObject { const waitForSessionStream = streamEvents(through.obj()); // eslint-disable-next-line @typescript-eslint/no-explicit-any (waitForSessionStream as any).abort = () => { - releaseSession(); if (requestStream) { requestStream.cancel(); } @@ -2569,12 +2468,6 @@ class Database extends common.GrpcServiceObject { function destroyStream(err: grpc.ServiceError) { waitForSessionStream.destroy(err); } - function releaseSession() { - if (session) { - sessionFactory_.release(session); - session = null; - } - } waitForSessionStream.on('reading', () => { sessionFactory_.getSession((err, session_) => { const span = getActiveOrNoopSpan(); @@ -2587,11 +2480,7 @@ class Database extends common.GrpcServiceObject { session = session_!; config.reqOpts.session = session!.formattedName_; requestStream = self.requestStream(config); - requestStream - .on('error', releaseSession) - .on('error', destroyStream) - .on('end', releaseSession) - .pipe(waitForSessionStream); + requestStream.on('error', destroyStream).pipe(waitForSessionStream); }); }); return waitForSessionStream; @@ -2964,7 +2853,7 @@ class Database extends common.GrpcServiceObject { ?.requestTag, }, span => { - this.sessionFactory_.getSessionForPartitionedOps((err, session) => { + this.sessionFactory_.getSession((err, session) => { if (err) { setSpanError(span, err); span.end(); @@ -2996,7 +2885,6 @@ class Database extends common.GrpcServiceObject { } transaction.begin(err => { if (err) { - this.sessionFactory_.release(session!); callback!(err, 0); return; } @@ -3004,13 +2892,11 @@ class Database extends common.GrpcServiceObject { transaction.runUpdate(query, async (err, updateCount) => { if (err) { if (err.code !== grpc.status.ABORTED) { - this.sessionFactory_.release(session!); callback!(err, 0); return; } void this._runPartitionedUpdate(session, query, callback); } else { - this.sessionFactory_.release(session!); callback!(null, updateCount); return; } @@ -3168,46 +3054,16 @@ class Database extends common.GrpcServiceObject { const snapshot = session!.snapshot(options, this.queryOptions_); - this._releaseOnEnd(session!, snapshot, span); - - let dataReceived = false; - let dataStream = snapshot.runStream(query); + const dataStream = snapshot.runStream(query); const endListener = () => { snapshot.end(); }; dataStream - .once('data', () => (dataReceived = true)) .once('error', err => { setSpanError(span, err); - - if ( - !dataReceived && - isSessionNotFoundError(err as grpc.ServiceError) && - !this.sessionFactory_.isMultiplexedEnabled() - ) { - // If it is a 'Session not found' error and we have not yet received - // any data, we can safely retry the query on a new session. - // Register the error on the session so the pool can discard it. - if (session) { - session.lastError = err as grpc.ServiceError; - } - span.addEvent('No session available', { - 'session.id': session?.id, - }); - // Remove the current data stream from the end user stream. - dataStream.unpipe(proxyStream); - dataStream.removeListener('end', endListener); - dataStream.end(); - snapshot.end(); - span.end(); - // Create a new data stream and add it to the end user stream. - dataStream = this.runStream(query, options); - dataStream.pipe(proxyStream); - } else { - proxyStream.destroy(err); - snapshot.end(); - } + proxyStream.destroy(err); + snapshot.end(); }) .on('stats', stats => proxyStream.emit('stats', stats)) .on('response', response => proxyStream.emit('response', response)) @@ -3341,66 +3197,46 @@ class Database extends common.GrpcServiceObject { transactionTag: options.requestOptions?.transactionTag, }, span => { - this.sessionFactory_.getSessionForReadWrite( - (err, session?, transaction?) => { - if (err) { - setSpanError(span, err); - } + this.sessionFactory_.getSession((err, session?, transaction?) => { + if (err) { + setSpanError(span, err); + } - if (err && isSessionNotFoundError(err as grpc.ServiceError)) { - span.addEvent('No session available', { - 'session.id': session?.id, - }); - span.end(); - this.runTransaction(options, runFn!); - return; - } + if (err) { + span.end(); + runFn!(err as grpc.ServiceError); + return; + } - if (err) { - span.end(); - runFn!(err as grpc.ServiceError); - return; - } + transaction!._observabilityOptions = this._observabilityOptions; - transaction!._observabilityOptions = this._observabilityOptions; + transaction!.requestOptions = Object.assign( + transaction!.requestOptions || {}, + options.requestOptions, + ); - transaction!.requestOptions = Object.assign( - transaction!.requestOptions || {}, - options.requestOptions, - ); + transaction!.setReadWriteTransactionOptions( + options as RunTransactionOptions, + ); - transaction!.setReadWriteTransactionOptions( - options as RunTransactionOptions, - ); + const runner = new TransactionRunner( + session!, + transaction!, + runFn!, + options, + ); - const release = () => { - this.sessionFactory_.release(session!); + runner + .run() + .then(() => { span.end(); - }; - - const runner = new TransactionRunner( - session!, - transaction!, - runFn!, - options, - ); - - runner.run().then(release, err => { + }) + .catch(err => { setSpanError(span, err!); - - if (isSessionNotFoundError(err)) { - span.addEvent('No session available', { - 'session.id': session?.id, - }); - release(); - this.runTransaction(options, runFn!); - } else { - setImmediate(runFn!, err); - release(); - } + setImmediate(runFn!, err); + span.end(); }); - }, - ); + }); }, ); } @@ -3485,7 +3321,7 @@ class Database extends common.GrpcServiceObject { : {}; let sessionId = ''; - const getSession = this.sessionFactory_.getSessionForReadWrite.bind( + const getSession = this.sessionFactory_.getSession.bind( this.sessionFactory_, ); @@ -3496,47 +3332,35 @@ class Database extends common.GrpcServiceObject { transactionTag: options?.requestOptions?.transactionTag, }, async span => { - // Loop to retry 'Session not found' errors. - // (and yes, we like while (true) more than for (;;) here) - // eslint-disable-next-line no-constant-condition - while (true) { - try { - const [session, transaction] = await promisify(getSession)(); - transaction.requestOptions = Object.assign( - transaction.requestOptions || {}, - options.requestOptions, - ); - transaction!.setReadWriteTransactionOptions( - options as RunTransactionOptions, - ); - sessionId = session?.id; - span.addEvent('Using Session', {'session.id': sessionId}); - const runner = new AsyncTransactionRunner( - session, - transaction, - runFn, - options, - ); + try { + const [session, transaction] = await promisify(getSession)(); + transaction.requestOptions = Object.assign( + transaction.requestOptions || {}, + options.requestOptions, + ); + transaction!.setReadWriteTransactionOptions( + options as RunTransactionOptions, + ); + sessionId = session?.id; + span.addEvent('Using Session', {'session.id': sessionId}); + const runner = new AsyncTransactionRunner( + session, + transaction, + runFn, + options, + ); - try { - return await runner.run(); - } catch (e) { - setSpanErrorAndException(span, e as Error); - throw e; - } finally { - span.end(); - this.sessionFactory_.release(session); - } + try { + return await runner.run(); } catch (e) { - if (isSessionNotFoundError(e as ServiceError)) { - span.addEvent('No session available', { - 'session.id': sessionId, - }); - } else { - span.end(); - throw e; - } + setSpanErrorAndException(span, e as Error); + throw e; + } finally { + span.end(); } + } catch (e) { + span.end(); + throw e; } }, ); @@ -3613,7 +3437,7 @@ class Database extends common.GrpcServiceObject { transactionTag: options?.requestOptions?.transactionTag, }, span => { - this.sessionFactory_.getSessionForReadWrite((err, session) => { + this.sessionFactory_.getSession((err, session) => { if (err) { proxyStream.destroy(err); setSpanError(span, err); @@ -3632,8 +3456,7 @@ class Database extends common.GrpcServiceObject { excludeTxnFromChangeStream: options?.excludeTxnFromChangeStreams, }, ); - let dataReceived = false; - let dataStream = this.requestStream({ + const dataStream = this.requestStream({ client: 'SpannerClient', method: 'batchWrite', reqOpts, @@ -3641,41 +3464,13 @@ class Database extends common.GrpcServiceObject { headers: this.commonHeaders_, }); dataStream - .once('data', () => (dataReceived = true)) .once('error', err => { setSpanError(span, err); - - if ( - !dataReceived && - isSessionNotFoundError(err as grpc.ServiceError) - ) { - // If there's a 'Session not found' error and we have not yet received - // any data, we can safely retry the writes on a new session. - // Register the error on the session so the pool can discard it. - if (session) { - session.lastError = err as grpc.ServiceError; - } - span.addEvent('No session available', { - 'session.id': session?.id, - }); - // Remove the current data stream from the end user stream. - dataStream.unpipe(proxyStream); - dataStream.end(); - span.end(); - // Create a new stream and add it to the end user stream. - dataStream = this.batchWriteAtLeastOnce( - mutationGroups, - options, - ); - dataStream.pipe(proxyStream); - } else { - span.end(); - proxyStream.destroy(err); - } + span.end(); + proxyStream.destroy(err); }) .once('end', () => { span.end(); - this.sessionFactory_.release(session!); }) .pipe(proxyStream); }); @@ -3762,18 +3557,6 @@ class Database extends common.GrpcServiceObject { return startTrace('Database.writeAtLeastOnce', this._traceConfig, span => { this.sessionFactory_.getSession((err, session?, transaction?) => { - if ( - err && - isSessionNotFoundError(err as grpc.ServiceError) && - !this.sessionFactory_.isMultiplexedEnabled() - ) { - span.addEvent('No session available', { - 'session.id': session?.id, - }); - span.end(); - this.writeAtLeastOnce(mutations, options, cb!); - return; - } if (err) { setSpanError(span, err); span.end(); @@ -3781,7 +3564,6 @@ class Database extends common.GrpcServiceObject { return; } span.addEvent('Using Session', {'session.id': session?.id}); - this._releaseOnEnd(session!, transaction!, span); try { transaction!.setReadWriteTransactionOptions( options as RunTransactionOptions, diff --git a/src/helper.ts b/src/helper.ts index 30692c13c..73d9270a2 100644 --- a/src/helper.ts +++ b/src/helper.ts @@ -13,82 +13,6 @@ * limitations under the License. */ -import {grpc} from 'google-gax'; -/** - * Checks whether the given error is a 'Database not found' error. - * @param {Error} error The error to check. - * @return {boolean} True if the error is a 'Database not found' error, and otherwise false. - */ -export function isDatabaseNotFoundError( - error: grpc.ServiceError | undefined, -): boolean { - return ( - error !== undefined && - error.code === grpc.status.NOT_FOUND && - error.message.includes('Database not found') - ); -} - -/** - * Checks whether the given error is an 'Instance not found' error. - * @param {Error} error The error to check. - * @return {boolean} True if the error is an 'Instance not found' error, and otherwise false. - */ -export function isInstanceNotFoundError( - error: grpc.ServiceError | undefined, -): boolean { - return ( - error !== undefined && - error.code === grpc.status.NOT_FOUND && - error.message.includes('Instance not found') - ); -} - -/** - * Checks whether the given error is a 'Could not load the default credentials' error. - * @param {Error} error The error to check. - * @return {boolean} True if the error is a 'Could not load the default credentials' error, and otherwise false. - */ -export function isDefaultCredentialsNotSetError( - error: grpc.ServiceError | undefined, -): boolean { - return ( - error !== undefined && - error.message.includes('Could not load the default credentials') - ); -} - -/** - * Checks whether the given error is an 'Unable to detect a Project Id in the current environment' error. - * @param {Error} error The error to check. - * @return {boolean} True if the error is an 'Unable to detect a Project Id in the current environment' error, and otherwise false. - */ -export function isProjectIdNotSetInEnvironmentError( - error: grpc.ServiceError | undefined, -): boolean { - return ( - error !== undefined && - error.message.includes( - 'Unable to detect a Project Id in the current environment', - ) - ); -} - -/** - * Checks whether the given error is a 'Create session permission' error. - * @param {Error} error The error to check. - * @return {boolean} True if the error is a 'Create session permission' error, and otherwise false. - */ -export function isCreateSessionPermissionError( - error: grpc.ServiceError | undefined, -): boolean { - return ( - error !== undefined && - error.code === grpc.status.PERMISSION_DENIED && - error.message.includes('spanner.sessions.create') - ); -} - /** * Converts any value into an array. Acts as a replacement for `arrify`. * If the value is null or undefined, returns an empty array. diff --git a/src/session-factory.ts b/src/session-factory.ts index 957da022e..46ec739e6 100644 --- a/src/session-factory.ts +++ b/src/session-factory.ts @@ -19,12 +19,6 @@ import { MultiplexedSession, MultiplexedSessionInterface, } from './multiplexed-session'; -import { - SessionPool, - SessionPoolInterface, - SessionPoolOptions, -} from './session-pool'; -import {SessionPoolConstructor} from './database'; import {ServiceObjectConfig} from '@google-cloud/common'; const common = require('./common-grpc/service-object'); @@ -55,51 +49,6 @@ export interface SessionFactoryInterface { * @param {GetSessionCallback} callback The callback function. */ getSession(callback: GetSessionCallback): void; - - /** - * When called returns a session for paritioned dml. - * - * @name SessionFactoryInterface#getSessionForPartitionedOps - * @param {GetSessionCallback} callback The callback function. - */ - getSessionForPartitionedOps(callback: GetSessionCallback): void; - - /** - * When called returns a session for read write. - * - * @name SessionFactoryInterface#getSessionForReadWrite - * @param {GetSessionCallback} callback The callback function. - */ - getSessionForReadWrite(callback: GetSessionCallback): void; - - /** - * When called returns the pool object. - * - * @name SessionFactoryInterface#getPool - */ - getPool(): SessionPoolInterface; - - /** - * To be called when releasing a session. - * - * @name SessionFactoryInterface#release - * @param {Session} session The session to be released. - */ - release(session: Session): void; - - /** - * When called returns if the multiplexed is enabled or not. - * - * @name SessionFactoryInterface#isMultiplexedEnabled - */ - isMultiplexedEnabled(): boolean; - - /** - * When called returns if the multiplexed is enabled or not for read write transactions. - * - * @name SessionFactoryInterface#isMultiplexedEnabledForRW - */ - isMultiplexedEnabledForRW(): boolean; } /** @@ -118,157 +67,24 @@ export class SessionFactory implements SessionFactoryInterface { multiplexedSession_: MultiplexedSessionInterface; - pool_: SessionPoolInterface; - isMultiplexed: boolean; - isMultiplexedPartitionOps: boolean; - isMultiplexedRW: boolean; - constructor( - database: Database, - name: String, - poolOptions?: SessionPoolConstructor | SessionPoolOptions, - ) { + constructor(database: Database, name: String) { super({ parent: database, id: name, } as {} as ServiceObjectConfig); - // initialize regular pool - this.pool_ = - typeof poolOptions === 'function' - ? new (poolOptions as SessionPoolConstructor)(database, null) - : new SessionPool(database, poolOptions); // initialize multiplexed session this.multiplexedSession_ = new MultiplexedSession(database); this.multiplexedSession_.createSession(); - - // set the isMultiplexed property to false if multiplexed session is disabled, otherwise set the property to true - this.isMultiplexed = !( - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS === 'false' - ); - - // set the isMultiplexedPartitionedOps property to false if multiplexed session is disabled for paritioned ops, otherwise set the property to true - this.isMultiplexedPartitionOps = !( - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS === 'false' && - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS === - 'false' - ); - - // set the isMultiplexedRW property to false if multiplexed session is disabled for read/write, otherwise set the property to true - this.isMultiplexedRW = !( - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS === 'false' && - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW === 'false' - ); - - // Regular sessions should only be created if mux is disabled. - if (!this.isMultiplexed) { - this.pool_.on('error', this.emit.bind(database, 'error')); - this.pool_.open(); - } } /** - * Retrieves a session, either a regular session or a multiplexed session, based on the environment variable configuration. - * - * If the environment variable `GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS` is set to `false`, the method will attempt to - * retrieve a regular session. Otherwise, it will return a multiplexed session. + * Retrieves a multiplexed session. * * @param {GetSessionCallback} callback The callback function. */ getSession(callback: GetSessionCallback): void { - const sessionHandler = this.isMultiplexed - ? this.multiplexedSession_ - : this.pool_; - - sessionHandler!.getSession((err, session, transaction) => - callback(err, session, transaction), - ); - } - - /** - * Retrieves a session for partitioned operations, selecting the appropriate session type - * based on whether multiplexed sessions are disabled or not. - * - * If multiplexed sessions are disabled for partitioned ops this methods delegates the request to `getSession()`, which returns - * either a multiplexed session or a regular session based on the env configuration. - * - * If the multiplexed sessions are disabled, a session will get retrieved from the regular session pool. - * Otherwise a multiplexed session will be used. - * - * @param {GetSessionCallback} callback The callback function. - */ - getSessionForPartitionedOps(callback: GetSessionCallback): void { - this.isMultiplexedPartitionOps - ? this.getSession(callback) - : this.pool_.getSession(callback); - } - - /** - * Retrieves a session for read write operations, selecting the appropriate session type - * based on whether multiplexed sessions are enabled. - * - * If multiplexed sessions are disabled for read write this methods delegates the request to `getSession()`, which returns - * either a multiplexed session or a regular session based on the env configuration. - * - * If the multiplexed sessions are disabled, a session will get retrieved from the regular session pool. - * Otherise a multiplexed session will be used. - * - * @param {GetSessionCallback} callback The callback function. - */ - getSessionForReadWrite(callback: GetSessionCallback): void { - this.isMultiplexedRW - ? this.getSession(callback) - : this.pool_.getSession(callback); - } - - /** - * Returns the regular session pool object. - * - * @returns {SessionPoolInterface} The session pool used by current instance. - */ - - getPool(): SessionPoolInterface { - return this.pool_; - } - - /** - * Releases a regular session back to the session pool. - * - * This methods does not release a multiplexed session. - * - * It returns a session to the pool after it is no longer needed. - * It is a no-op for multiplexed sessions. - * - * @param {Session} session - The session to be released. This should be an instance of `Session` that was - * previously acquired from the session pool. - * - * @throws {Error} If the session is invalid or cannot be released. - */ - release(session: Session): void { - if (!session.metadata?.multiplexed) { - this.pool_.release(session); - } - } - - /** - * Returns if a multiplexed is enabled or not. - * - * This method returns true if multiplexed session is enabled, otherwise returns false - * - * @returns {boolean} - */ - isMultiplexedEnabled(): boolean { - return this.isMultiplexed; - } - - /** - * Returns if a multiplexed is enabled or not for read write transaction. - * - * This method returns true if multiplexed session is enabled for read write transactions, otherwise returns false - * - * @returns {boolean} - */ - isMultiplexedEnabledForRW(): boolean { - return this.isMultiplexedRW; + this.multiplexedSession_.getSession(callback); } } diff --git a/src/session-pool.ts b/src/session-pool.ts index 78c0290f1..1276b708a 100644 --- a/src/session-pool.ts +++ b/src/session-pool.ts @@ -15,31 +15,15 @@ */ import {EventEmitter} from 'events'; -import PQueue from 'p-queue'; - import {Database} from './database'; import {Session} from './session'; import {Transaction} from './transaction'; import {NormalCallback} from './common'; -import {GoogleError, grpc, ServiceError} from 'google-gax'; -import trace = require('stack-trace'); -import { - ObservabilityOptions, - getActiveOrNoopSpan, - setSpanErrorAndException, - startTrace, -} from './instrument'; +import {GoogleError, ServiceError} from 'google-gax'; import {GetSessionCallback} from './session-factory'; -import { - isDatabaseNotFoundError, - isInstanceNotFoundError, - isDefaultCredentialsNotSetError, - isProjectIdNotSetInEnvironmentError, - isCreateSessionPermissionError, - isInfinite, -} from './helper'; /** + * /** @deprecated. * @callback SessionPoolCloseCallback * @param {?Error} error Closing error, if any. */ @@ -147,20 +131,47 @@ export interface SessionPoolInterface extends EventEmitter { * least one more session is needed. */ export interface SessionPoolOptions { + /** + * @deprecated. Starting from v8.3.0 the multiplexed session is used and session pool is not maintained. + */ acquireTimeout?: number; + /** + * @deprecated. Starting from v8.3.0 the multiplexed session is used and session pool is not maintained. + */ concurrency?: number; + /** + * @deprecated. Starting from v8.3.0 the multiplexed session is used and session pool is not maintained. + */ fail?: boolean; + /** + * @deprecated. Starting from v8.3.0 the multiplexed session is used and session pool is not maintained. + */ idlesAfter?: number; + /** + * @deprecated. Starting from v8.3.0 the multiplexed session is used and session pool is not maintained. + */ keepAlive?: number; labels?: {[label: string]: string}; + /** + * @deprecated. Starting from v8.3.0 the multiplexed session is used and session pool is not maintained. + */ max?: number; + /** + * @deprecated. Starting from v8.3.0 the multiplexed session is used and session pool is not maintained. + */ maxIdle?: number; + /** + * @deprecated. Starting from v8.3.0 the multiplexed session is used and session pool is not maintained. + */ min?: number; /** * @deprecated. Starting from v6.5.0 the same session can be reused for * different types of transactions. */ writes?: number; + /** + * @deprecated. Starting from v8.3.0 the multiplexed session is used and session pool is not maintained. + */ incStep?: number; databaseRole?: string | null; } @@ -220,21 +231,6 @@ export class SessionPoolExhaustedError extends GoogleError { } } -/** - * Checks whether the given error is a 'Session not found' error. - * @param error the error to check - * @return true if the error is a 'Session not found' error, and otherwise false. - */ -export function isSessionNotFoundError( - error: grpc.ServiceError | undefined, -): boolean { - return ( - error !== undefined && - error.code === grpc.status.NOT_FOUND && - error.message.includes('Session not found') - ); -} - /** * enum to capture errors that can appear from multiple places */ @@ -244,11 +240,6 @@ const enum errors { Exhausted = 'No resources available.', } -interface SessionInventory { - sessions: Session[]; - borrowed: Set; -} - /** @deprecated. */ export interface CreateSessionsOptions { writes?: number; @@ -268,42 +259,10 @@ export class SessionPool extends EventEmitter implements SessionPoolInterface { database: Database; isOpen: boolean; options: SessionPoolOptions; - _acquires: PQueue; - _evictHandle!: NodeJS.Timer; - _inventory: SessionInventory; - _onClose!: Promise; - _pending = 0; - _waiters = 0; - _pingHandle!: NodeJS.Timer; - _requests: PQueue; - _traces: Map; - _observabilityOptions?: ObservabilityOptions; - - /** - * Formats stack trace objects into Node-like stack trace. - * - * @param {object[]} trace The trace object. - * @return {string} - */ - static formatTrace(frames: trace.StackFrame[]): string { - const stack = frames.map(frame => { - const name = frame.getFunctionName() || frame.getMethodName(); - const file = frame.getFileName(); - const lineno = frame.getLineNumber(); - const columnno = frame.getColumnNumber(); - - return ` at ${name} (${file}:${lineno}:${columnno})`; - }); - - return `Session leak detected!\n${stack.join('\n')}`; - } - /** - * Total number of available sessions. - * @type {number} - */ + /** @deprecated Use totalWaiters instead. */ get available(): number { - return this._inventory.sessions.length; + return 0; } /** @deprecated Starting from v6.5.0 the same session can be reused for * different types of transactions. @@ -317,7 +276,7 @@ export class SessionPool extends EventEmitter implements SessionPoolInterface { * @type {number} */ get borrowed(): number { - return this._inventory.borrowed.size + this._pending; + return 0; } /** @@ -325,7 +284,7 @@ export class SessionPool extends EventEmitter implements SessionPoolInterface { * @type {boolean} */ get isFull(): boolean { - return this.size >= this.options.max!; + return false; } /** @deprecated Use `size()` instead. */ @@ -356,7 +315,7 @@ export class SessionPool extends EventEmitter implements SessionPoolInterface { * @type {number} */ get totalPending(): number { - return this._pending; + return 0; } /** @deprecated Use totalWaiters instead. */ @@ -366,7 +325,7 @@ export class SessionPool extends EventEmitter implements SessionPoolInterface { /** @deprecated Use totalWaiters instead. */ get numWriteWaiters(): number { - return this.totalWaiters; + return 0; } /** @@ -374,7 +333,7 @@ export class SessionPool extends EventEmitter implements SessionPoolInterface { * @type {number} */ get totalWaiters(): number { - return this._waiters; + return 0; } /** @@ -384,33 +343,9 @@ export class SessionPool extends EventEmitter implements SessionPoolInterface { */ constructor(database: Database, options?: SessionPoolOptions) { super(); - - if (options && options.min && options.max && options.min > options.max) { - throw new TypeError('Min sessions may not be greater than max sessions.'); - } this.isOpen = false; this.database = database; this.options = Object.assign({}, DEFAULTS, options); - this.options.min = Math.min(this.options.min!, this.options.max!); - this.options.databaseRole = this.options.databaseRole - ? this.options.databaseRole - : database.databaseRole; - - this._inventory = { - sessions: [], - borrowed: new Set(), - }; - this._waiters = 0; - this._requests = new PQueue({ - concurrency: this.options.concurrency!, - }); - - this._acquires = new PQueue({ - concurrency: 1, - }); - - this._traces = new Map(); - this._observabilityOptions = database._observabilityOptions; } /** @@ -419,36 +354,7 @@ export class SessionPool extends EventEmitter implements SessionPoolInterface { * @emits SessionPool#close * @param {SessionPoolCloseCallback} callback The callback function. */ - close(callback: SessionPoolCloseCallback): void { - const sessions: Session[] = [ - ...this._inventory.sessions, - ...this._inventory.borrowed, - ]; - - this.isOpen = false; - - this._stopHouseKeeping(); - this.emit('close'); - - sessions.forEach(session => this._destroy(session)); - - this._requests - .onIdle() - .then(() => { - const leaks = this._getLeaks(); - let error; - - this._inventory.sessions = []; - this._inventory.borrowed.clear(); - - if (leaks.length) { - error = new SessionLeakError(leaks); - } - - callback(error); - }) - .catch(err => callback(err)); - } + close(callback: SessionPoolCloseCallback): void {} /** * Retrieve a read session. @@ -478,10 +384,7 @@ export class SessionPool extends EventEmitter implements SessionPoolInterface { * @param {GetSessionCallback} callback The callback function. */ getSession(callback: GetSessionCallback): void { - this._acquire().then( - session => callback(null, session, session.txn!), - callback, - ); + callback(null, null, null); } /** @@ -491,28 +394,7 @@ export class SessionPool extends EventEmitter implements SessionPoolInterface { * @emits SessionPool#open * @return {Promise} */ - open(): void { - this._onClose = new Promise(resolve => this.once('close', resolve)); - this._startHouseKeeping(); - - this.isOpen = true; - this.emit('open'); - - this._fill().catch(err => { - // Ignore `Database not found` error. This allows a user to call instance.database('db-name') - // for a database that does not yet exist with SessionPoolOptions.min > 0. - if ( - isDatabaseNotFoundError(err) || - isInstanceNotFoundError(err) || - isCreateSessionPermissionError(err) || - isDefaultCredentialsNotSetError(err) || - isProjectIdNotSetInEnvironmentError(err) - ) { - return; - } - this.emit('error', err); - }); - } + open(): void {} /** * Releases session back into the pool. @@ -525,556 +407,5 @@ export class SessionPool extends EventEmitter implements SessionPoolInterface { * @fires @deprecated SessionPool#readwrite-available * @param {Session} session The session to release. */ - release(session: Session): void { - if (!this._inventory.borrowed.has(session)) { - throw new ReleaseError(session); - } - - delete session.txn; - session.lastUsed = Date.now(); - - if (isSessionNotFoundError(session.lastError)) { - // Remove the session from the pool. It is not necessary to call _destroy, - // as the session is already removed from the backend. - this._inventory.borrowed.delete(session); - this._traces.delete(session.id); - return; - } - session.lastError = undefined; - - // Delete the trace associated with this session to mark the session as checked - // back into the pool. This will prevent the session to be marked as leaked if - // the pool is closed while the session is being prepared. - this._traces.delete(session.id); - // Release it into the pool as a session if there are more waiters than - // there are sessions available. Releasing it will unblock a waiter as soon - // as possible. - this._release(session); - } - - /** - * Attempts to borrow a session from the pool. - * - * @private - * - * @returns {Promise} - */ - async _acquire(): Promise { - const span = getActiveOrNoopSpan(); - if (!this.isOpen) { - span.addEvent('SessionPool is closed'); - throw new GoogleError(errors.Closed); - } - - // Get the stacktrace of the caller before we call any async methods, as calling an async method will break the stacktrace. - const frames = trace.get(); - const startTime = Date.now(); - const timeout = this.options.acquireTimeout; - - // wrapping this logic in a function to call recursively if the session - // we end up with is already dead - const getSession = async (): Promise => { - span.addEvent('Acquiring session'); - const elapsed = Date.now() - startTime; - - if (elapsed >= timeout!) { - span.addEvent('Could not acquire session due to an exceeded timeout'); - throw new GoogleError(errors.Timeout); - } - - const session = await this._getSession(startTime); - - if (this._isValidSession(session)) { - span.addEvent('Acquired session', { - 'time.elapsed': Date.now() - startTime, - 'session.id': session.id.toString(), - }); - return session; - } - - span.addEvent( - 'Could not acquire session because it was invalid. Retrying', - { - 'session.id': session.id.toString(), - }, - ); - this._inventory.borrowed.delete(session); - return getSession(); - }; - - const session = await this._acquires.add(getSession); - this._prepareTransaction(session); - this._traces.set(session.id, frames); - return session; - } - - /** - * Moves a session into the borrowed group. - * - * @private - * - * @param {Session} session The session object. - */ - _borrow(session: Session): void { - const index = this._inventory.sessions.indexOf(session); - - this._inventory.borrowed.add(session); - this._inventory.sessions.splice(index, 1); - } - - /** - * Borrows the first session from the inventory. - * - * @private - * - * @return {Session} - */ - _borrowFrom(): Session { - const session = this._inventory.sessions.pop()!; - this._inventory.borrowed.add(session); - return session; - } - - /** - * Grabs the next available session. - * - * @private - * - * @returns {Promise} - */ - _borrowNextAvailableSession(): Session { - return this._borrowFrom(); - } - - /** - * Attempts to create a single session. - * - * @private - * - * @returns {Promise} - */ - _createSession(): Promise { - return this._createSessions(1); - } - - /** - * Batch creates sessions. - * - * @private - * - * @param {number} [amount] Config specifying how many sessions to create. - * @returns {Promise} - * @emits SessionPool#createError - */ - async _createSessions(amount: number): Promise { - const labels = this.options.labels!; - const databaseRole = this.options.databaseRole!; - - if (amount <= 0) { - return; - } - this._pending += amount; - - let nReturned = 0; - const nRequested: number = amount; - - // TODO: Inlining this code for now and later on shall go - // extract _traceConfig to the constructor when we have plenty of time. - const traceConfig = { - opts: this._observabilityOptions, - dbName: this.database.formattedName_, - }; - return startTrace('SessionPool.createSessions', traceConfig, async span => { - span.addEvent(`Requesting ${amount} sessions`); - - // while we can request as many sessions be created as we want, the backend - // will return at most 100 at a time, hence the need for a while loop. - while (amount > 0) { - let sessions: Session[] | null = null; - - span.addEvent(`Creating ${amount} sessions`); - - try { - [sessions] = await this.database.batchCreateSessions({ - count: amount, - labels: labels, - databaseRole: databaseRole, - }); - - amount -= sessions.length; - nReturned += sessions.length; - } catch (e) { - this._pending -= amount; - this.emit('createError', e); - span.addEvent( - `Requested for ${nRequested} sessions returned ${nReturned}`, - ); - setSpanErrorAndException(span, e as Error); - span.end(); - throw e; - } - - sessions.forEach((session: Session) => { - setImmediate(() => { - this._inventory.borrowed.add(session); - this._pending -= 1; - this.release(session); - }); - }); - } - - span.addEvent( - `Requested for ${nRequested} sessions returned ${nReturned}`, - ); - span.end(); - }); - } - - /** - * Attempts to delete a session, optionally creating a new one of the same - * type if the pool is still open and we're under the configured min value. - * - * @private - * - * @fires SessionPool#error - * @param {Session} session The session to delete. - * @returns {Promise} - */ - async _destroy(session: Session): Promise { - try { - await this._requests.add(() => session.delete()); - } catch (e) { - this.emit('error', e); - } - } - - /** - * Deletes idle sessions that exceed the maxIdle configuration. - * - * @private - */ - _evictIdleSessions(): void { - const {maxIdle, min} = this.options; - const size = this.size; - const idle = this._getIdleSessions(); - - let count = idle.length; - let evicted = 0; - - while (count-- > maxIdle! && size - evicted++ > min!) { - const session = idle.pop(); - - if (!session) { - continue; - } - - const index = this._inventory.sessions.indexOf(session); - - this._inventory.sessions.splice(index, 1); - void this._destroy(session); - } - } - - /** - * Fills the pool with the minimum number of sessions. - * - * @return {Promise} - */ - async _fill(): Promise { - const needed = this.options.min! - this.size; - if (needed <= 0) { - return; - } - - await this._createSessions(needed); - } - - /** - * Retrieves a list of all the idle sessions. - * - * @private - * - * @returns {Session[]} - */ - _getIdleSessions(): Session[] { - const idlesAfter = this.options.idlesAfter! * 60000; - const sessions: Session[] = this._inventory.sessions; - - return sessions.filter(session => { - return Date.now() - session.lastUsed! >= idlesAfter; - }); - } - - /** - * Returns stack traces for sessions that have not been released. - * - * @return {string[]} - */ - _getLeaks(): string[] { - return [...this._traces.values()].map(SessionPool.formatTrace); - } - - /** - * Returns true if the pool has a usable session. - * @private - */ - _hasSessionUsableFor(): boolean { - return this._inventory.sessions.length > 0; - } - - /** - * Attempts to get a session. - * - * @private - * - * @param {number} startTime Timestamp to use when determining timeouts. - * @returns {Promise} - */ - async _getSession(startTime: number): Promise { - const span = getActiveOrNoopSpan(); - if (this._hasSessionUsableFor()) { - span.addEvent('Cache hit: has usable session'); - return this._borrowNextAvailableSession(); - } - if (this.isFull && this.options.fail!) { - span.addEvent('Session pool is full and failFast=true'); - throw new SessionPoolExhaustedError(this._getLeaks()); - } - - let removeOnceCloseListener: Function; - let removeListener: Function; - - // Wait for a session to become available. - span.addEvent('Waiting for a session to become available'); - const availableEvent = 'session-available'; - const promises = [ - new Promise((_, reject) => { - const onceCloseListener = () => reject(new GoogleError(errors.Closed)); - this.once('close', onceCloseListener); - removeOnceCloseListener = this.removeListener.bind( - this, - 'close', - onceCloseListener, - ); - }), - new Promise(resolve => { - this.once(availableEvent, resolve); - removeListener = this.removeListener.bind( - this, - availableEvent, - resolve, - ); - }), - ]; - - const timeout = this.options.acquireTimeout; - - let removeTimeoutListener = () => {}; - if (!isInfinite(timeout!)) { - const elapsed = Date.now() - startTime!; - const remaining = timeout! - elapsed; - - promises.push( - new Promise((_, reject) => { - const error = new Error(errors.Timeout); - const timeoutFunction = setTimeout( - reject.bind(null, error), - remaining, - ); - removeTimeoutListener = () => clearTimeout(timeoutFunction); - }), - ); - } - - // Only create a new session if there are more waiters than sessions already - // being created. The current requester will be waiter number _numWaiters+1. - if (!this.isFull && this.totalPending <= this.totalWaiters) { - let amount = this.options.incStep - ? this.options.incStep - : DEFAULTS.incStep!; - // Create additional sessions if the configured minimum has not been reached. - const min = this.options.min ? this.options.min : 0; - if (this.size + this.totalPending + amount < min) { - amount = min - this.size - this.totalPending; - } - // Make sure we don't create more sessions than the pool should have. - if (amount + this.size > this.options.max!) { - amount = this.options.max! - this.size; - } - if (amount > 0) { - this._pending += amount; - promises.push( - new Promise((_, reject) => { - this._pending -= amount; - this._createSessions(amount).catch(reject); - }), - ); - } - } - - let removeErrorListener: Function; - promises.push( - new Promise((_, reject) => { - this.once('createError', reject); - removeErrorListener = this.removeListener.bind( - this, - 'createError', - reject, - ); - }), - ); - - try { - this._waiters++; - await Promise.race(promises); - } finally { - this._waiters--; - removeOnceCloseListener!(); - removeListener!(); - removeErrorListener!(); - removeTimeoutListener(); - } - - return this._borrowNextAvailableSession(); - } - - /** - * Checks to see whether or not session is expired. - * - * @param {Session} session The session to check. - * @returns {boolean} - */ - _isValidSession(session: Session): boolean { - // unpinged sessions only stay good for 1 hour - const MAX_DURATION = 60000 * 60; - - return Date.now() - session.lastUsed! < MAX_DURATION; - } - - /** - * Pings an individual session. - * - * @private - * - * @param {Session} session The session to ping. - * @returns {Promise} - */ - async _ping(session: Session): Promise { - // NOTE: Please do not trace Ping as it gets quite spammy - // with many root spans polluting the main span. - // Please see https://github.com/googleapis/google-cloud-go/issues/1691 - - this._borrow(session); - - if (!this._isValidSession(session)) { - this._inventory.borrowed.delete(session); - return; - } - - try { - await session.keepAlive(); - this.release(session); - } catch (e) { - this._inventory.borrowed.delete(session); - await this._destroy(session); - } - } - - /** - * Makes a keep alive request to all the idle sessions. - * - * @private - * - * @returns {Promise} - */ - async _pingIdleSessions(): Promise { - const sessions = this._getIdleSessions(); - const pings = sessions.map(session => this._ping(session)); - - await Promise.all(pings); - try { - await this._fill(); - } catch (error) { - // Ignore `Database not found` error. This allows a user to call instance.database('db-name') - // for a database that does not yet exist with SessionPoolOptions.min > 0. - const err = error as ServiceError; - if ( - isDatabaseNotFoundError(err) || - isInstanceNotFoundError(err) || - isCreateSessionPermissionError(err) || - isDefaultCredentialsNotSetError(err) || - isProjectIdNotSetInEnvironmentError(err) - ) { - return; - } - this.emit('error', err); - } - return; - } - - /** - * Creates a transaction for a session. - * - * @private - * - * @param {Session} session The session object. - * @param {object} options The transaction options. - */ - _prepareTransaction(session: Session): void { - const transaction = session.transaction( - (session.parent as Database).queryOptions_, - ); - session.txn = transaction; - } - - /** - * Releases a session back into the pool. - * - * @private - * - * @fires SessionPool#available - * @fires SessionPool#session-available - * @fires @deprecated SessionPool#readonly-available - * @fires @deprecated SessionPool#readwrite-available - * @param {Session} session The session object. - */ - _release(session: Session): void { - this._inventory.sessions.push(session); - this._inventory.borrowed.delete(session); - this._traces.delete(session.id); - - this.emit('available'); - this.emit('session-available'); - this.emit('readonly-available'); - this.emit('readwrite-available'); - } - - /** - * Starts housekeeping (pinging/evicting) of idle sessions. - * - * @private - */ - _startHouseKeeping(): void { - const evictRate = this.options.idlesAfter! * 60000; - - this._evictHandle = setInterval(() => this._evictIdleSessions(), evictRate); - this._evictHandle.unref(); - - const pingRate = this.options.keepAlive! * 60000; - - this._pingHandle = setInterval(() => this._pingIdleSessions(), pingRate); - this._pingHandle.unref(); - } - - /** - * Stops housekeeping. - * - * @private - */ - _stopHouseKeeping(): void { - // eslint-disable-next-line @typescript-eslint/no-explicit-any - clearInterval(this._pingHandle as any); - // eslint-disable-next-line @typescript-eslint/no-explicit-any - clearInterval(this._evictHandle as any); - } + release(session: Session): void {} } diff --git a/src/transaction-runner.ts b/src/transaction-runner.ts index 4f569fcbf..0cc44f824 100644 --- a/src/transaction-runner.ts +++ b/src/transaction-runner.ts @@ -22,7 +22,6 @@ import * as through from 'through2'; import {Session} from './session'; import {Transaction} from './transaction'; import {NormalCallback} from './common'; -import {isSessionNotFoundError} from './session-pool'; import {Database} from './database'; import {google} from '../protos/protos'; import IRequestOptions = google.spanner.v1.IRequestOptions; @@ -176,11 +175,6 @@ export abstract class Runner { return secondsInMs + nanosInMs; } - // A 'Session not found' error without any specific retry info should not - // cause any delay between retries. - if (isSessionNotFoundError(err)) { - return 0; - } // Max backoff should be 32 seconds. return ( @@ -191,11 +185,7 @@ export abstract class Runner { /** Returns whether the given error should cause a transaction retry. */ shouldRetry(err: grpc.ServiceError): boolean { - return ( - RETRYABLE.includes(err.code!) || - isSessionNotFoundError(err) || - isRetryableInternalError(err) - ); + return RETRYABLE.includes(err.code!) || isRetryableInternalError(err); } /** * Retrieves a transaction to run against. @@ -252,9 +242,6 @@ export abstract class Runner { this.multiplexedSessionPreviousTransactionId = transaction.id; } - // Note that if the error is a 'Session not found' error, it will be - // thrown here. We do this to bubble this error up to the caller who is - // responsible for retrying the transaction on a different session. if ( !RETRYABLE.includes(lastError.code!) && !isRetryableInternalError(lastError) diff --git a/src/transaction.ts b/src/transaction.ts index c788312da..9694a67d9 100644 --- a/src/transaction.ts +++ b/src/transaction.ts @@ -572,10 +572,7 @@ export class Snapshot extends EventEmitter { const session = this.session.formattedName_!; const options = this._options; - if ( - this.multiplexedSessionPreviousTransactionId && - (this.session.parent as Database).isMuxEnabledForRW_ - ) { + if (this.multiplexedSessionPreviousTransactionId) { options.readWrite!.multiplexedSessionPreviousTransactionId = this.multiplexedSessionPreviousTransactionId; } @@ -833,11 +830,7 @@ export class Snapshot extends EventEmitter { transaction.singleUse = this._options; } - if ( - !this.id && - this._options.readWrite && - (this.session.parent as Database).isMuxEnabledForRW_ - ) { + if (!this.id && this._options.readWrite) { this._setPreviousTransactionId(transaction); } @@ -1455,11 +1448,7 @@ export class Snapshot extends EventEmitter { transaction.singleUse = this._options; } - if ( - !this.id && - this._options.readWrite && - (this.session.parent as Database).isMuxEnabledForRW_ - ) { + if (!this.id && this._options.readWrite) { this._setPreviousTransactionId(transaction); } delete query.gaxOptions; @@ -2177,11 +2166,7 @@ export class Transaction extends Dml { transaction.begin = this._options; } - if ( - !this.id && - this._options.readWrite && - (this.session.parent as Database).isMuxEnabledForRW_ - ) { + if (!this.id && this._options.readWrite) { this._setPreviousTransactionId(transaction); } @@ -2403,9 +2388,7 @@ export class Transaction extends Dml { } else if (!this._useInRunner) { reqOpts.singleUseTransaction = this._options; } else { - if ((this.session.parent as Database).isMuxEnabledForRW_) { - this._setMutationKey(mutations); - } + this._setMutationKey(mutations); this.begin().then( () => { this.commit(options, (err, resp) => { diff --git a/test/database.ts b/test/database.ts index 370230be3..7699f625c 100644 --- a/test/database.ts +++ b/test/database.ts @@ -29,7 +29,6 @@ import * as pfy from '@google-cloud/promisify'; import {grpc} from 'google-gax'; import * as db from '../src/database'; import {Spanner, Instance, MutationGroup} from '../src'; -import {MockError} from './mockserver/mockspanner'; import {IOperation} from '../src/instance'; import { CLOUD_RESOURCE_HEADER, @@ -49,7 +48,6 @@ import { CommitOptions, MutationSet, } from '../src/transaction'; -import {SessionFactory} from '../src/session-factory'; import {RunTransactionOptions} from '../src/transaction-runner'; import { X_GOOG_SPANNER_REQUEST_ID_HEADER, @@ -147,21 +145,6 @@ export class FakeSessionFactory extends EventEmitter { this.calledWith_ = arguments; } getSession() {} - getSessionForPartitionedOps() {} - getSessionForReadWrite() {} - getPool(): FakeSessionPool { - return new FakeSessionPool(); - } - release() {} - isMultiplexedEnabled(): boolean { - return process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS! === 'false'; - } - isMultiplexedEnabledForRW(): boolean { - return ( - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS! === 'false' && - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW! === 'false' - ); - } } class FakeTable { @@ -342,39 +325,6 @@ describe('Database', () => { assert(database.formattedName_, formattedName); }); - it('should accept a custom Pool class', () => { - function FakePool() {} - const database = new Database( - INSTANCE, - NAME, - FakePool as {} as db.SessionPoolConstructor, - ); - assert(database.pool_ instanceof FakeSessionPool); - }); - - describe('when multiplexed session is disabled', () => { - before(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; - }); - - after(() => { - delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; - }); - - it('should re-emit SessionPool errors', done => { - const error = new Error('err'); - - const sessionFactory = new SessionFactory(database, NAME); - - database.on('error', err => { - assert.strictEqual(err, error); - done(); - }); - - sessionFactory.pool_.emit('error', error); - }); - }); - it('should inherit from ServiceObject', done => { const options = {}; @@ -674,10 +624,7 @@ describe('Database', () => { fakeDataStream = through.obj(); getSessionStub = ( - sandbox.stub( - fakeSessionFactory, - 'getSessionForReadWrite', - ) as sinon.SinonStub + sandbox.stub(fakeSessionFactory, 'getSession') as sinon.SinonStub ).callsFake(callback => callback(null, fakeSession)); requestStreamStub = sandbox @@ -795,41 +742,6 @@ describe('Database', () => { fakeDataStream.emit('error', fakeError); done(); }); - - it('should retry on "Session not found" error', done => { - const sessionNotFoundError = { - code: grpc.status.NOT_FOUND, - message: 'Session not found', - } as grpc.ServiceError; - let retryCount = 0; - - database - .batchWriteAtLeastOnce(mutationGroups, options) - .on('data', () => {}) - .on('error', err => { - assert.fail(err); - }) - .on('end', () => { - assert.strictEqual(retryCount, 1); - done(); - }); - - fakeDataStream.emit('error', sessionNotFoundError); - retryCount++; - }); - - it('should release session on stream end', () => { - const releaseStub = sandbox.stub( - fakeSessionFactory, - 'release', - ) as sinon.SinonStub; - - database.batchWriteAtLeastOnce(mutationGroups, options); - fakeDataStream.emit('end'); - - assert.strictEqual(releaseStub.callCount, 1); - assert.strictEqual(releaseStub.firstCall.args[0], fakeSession); - }); }); describe('writeAtLeastOnce', () => { @@ -955,7 +867,7 @@ describe('Database', () => { }); }); - describe('error', () => { + describe.skip('error', () => { it('should return the closing error', done => { const error = new Error('err.'); @@ -1575,15 +1487,13 @@ describe('Database', () => { database.sessionFactory_ = SESSIONFACTORY; - SESSIONFACTORY.getSessionForReadWrite = callback => { + SESSIONFACTORY.getSession = callback => { callback(null, SESSION); }; - - SESSIONFACTORY.release = util.noop; }); it('should get a session', done => { - SESSIONFACTORY.getSessionForReadWrite = () => { + SESSIONFACTORY.getSession = () => { done(); }; @@ -1593,7 +1503,7 @@ describe('Database', () => { it('should return error if it cannot get a session', done => { const error = new Error('Error.'); - SESSIONFACTORY.getSessionForReadWrite = callback => { + SESSIONFACTORY.getSession = callback => { callback(error); }; @@ -1621,19 +1531,6 @@ describe('Database', () => { database.makePooledRequest_(CONFIG, assert.ifError); }); - it('should release the session after calling the method', done => { - SESSIONFACTORY.release = session => { - assert.deepStrictEqual(session, SESSION); - done(); - }; - - database.request = (config, callback) => { - callback(); - }; - - database.makePooledRequest_(CONFIG, assert.ifError); - }); - it('should execute the callback with original arguments', done => { const originalArgs = ['a', 'b', 'c']; @@ -1674,8 +1571,6 @@ describe('Database', () => { SESSIONFACTORY.getSession = callback => { callback(null, SESSION); }; - - SESSIONFACTORY.release = util.noop; }); it('should get a session when stream opens', done => { @@ -1734,30 +1629,6 @@ describe('Database', () => { REQUEST_STREAM.end(responseData); }); - it('should release session when request stream ends', done => { - SESSIONFACTORY.release = session => { - assert.strictEqual(session, SESSION); - done(); - }; - - database.makePooledStreamingRequest_(CONFIG).emit('reading'); - - REQUEST_STREAM.end(); - }); - - it('should release session when request stream errors', done => { - SESSIONFACTORY.release = session => { - assert.strictEqual(session, SESSION); - done(); - }; - - database.makePooledStreamingRequest_(CONFIG).emit('reading'); - - setImmediate(() => { - REQUEST_STREAM.emit('error'); - }); - }); - it('should error user stream when request stream errors', done => { const error = new Error('Error.'); @@ -1790,44 +1661,6 @@ describe('Database', () => { }; }); - it('should release the session', done => { - SESSIONFACTORY.release = session => { - assert.strictEqual(session, SESSION); - done(); - }; - - const requestStream = database.makePooledStreamingRequest_(CONFIG); - - requestStream.emit('reading'); - - setImmediate(() => { - requestStream.abort(); - }); - }); - - it('should not release the session more than once', done => { - let numTimesReleased = 0; - - SESSIONFACTORY.release = session => { - numTimesReleased++; - assert.strictEqual(session, SESSION); - }; - - const requestStream = database.makePooledStreamingRequest_(CONFIG); - - requestStream.emit('reading'); - - setImmediate(() => { - requestStream.abort(); - assert.strictEqual(numTimesReleased, 1); - - requestStream.abort(); - assert.strictEqual(numTimesReleased, 1); - - done(); - }); - }); - it('should cancel the request stream', done => { REQUEST_STREAM.cancel = done; const requestStream = database.makePooledStreamingRequest_(CONFIG); @@ -1955,8 +1788,6 @@ describe('Database', () => { .returns(fakeStream); sandbox.stub(fakeSnapshot2, 'runStream').returns(fakeStream2); - - sandbox.stub(fakeSessionFactory, 'isMultiplexedEnabled').returns(true); }); it('should get a read session via `getSession`', () => { @@ -2022,71 +1853,6 @@ describe('Database', () => { fakeStream.destroy(fakeError); }); - - it('should not retry on "Session not found" error', done => { - const sessionNotFoundError = { - code: grpc.status.NOT_FOUND, - message: 'Session not found', - } as grpc.ServiceError; - const endStub = sandbox.stub(fakeSnapshot, 'end'); - const endStub2 = sandbox.stub(fakeSnapshot2, 'end'); - const rows = 0; - - database.runStream(QUERY).on('error', err => { - assert.strictEqual(err, sessionNotFoundError); - assert.strictEqual(endStub.callCount, 1); - // make sure it is not retrying the stream - assert.strictEqual(endStub2.callCount, 0); - // row count should be 0 - assert.strictEqual(rows, 0); - done(); - }); - - fakeStream.emit('error', sessionNotFoundError); - fakeStream2.push('row1'); - fakeStream2.push(null); - }); - - it('should release the session on transaction end', () => { - const releaseStub = sandbox.stub( - fakeSessionFactory, - 'release', - ) as sinon.SinonStub; - - database.runStream(QUERY); - fakeSnapshot.emit('end'); - - const session = releaseStub.lastCall.args[0]; - assert.strictEqual(session, fakeSession); - }); - - // since mux is default enabled, session pool is not getting created - it.skip('should retry "Session not found" error', done => { - const sessionNotFoundError = { - code: grpc.status.NOT_FOUND, - message: 'Session not found', - } as grpc.ServiceError; - const endStub = sandbox.stub(fakeSnapshot, 'end'); - const endStub2 = sandbox.stub(fakeSnapshot2, 'end'); - let rows = 0; - - database - .runStream(QUERY) - .on('data', () => rows++) - .on('error', err => { - assert.fail(err); - }) - .on('end', () => { - assert.strictEqual(endStub.callCount, 1); - assert.strictEqual(endStub2.callCount, 1); - assert.strictEqual(rows, 1); - done(); - }); - - fakeStream.emit('error', sessionNotFoundError); - fakeStream2.push('row1'); - fakeStream2.push(null); - }); }); describe('table', () => { @@ -2363,13 +2129,6 @@ describe('Database', () => { snapshotStub = ( sandbox.stub(fakeSession, 'snapshot') as sinon.SinonStub ).returns(fakeSnapshot); - - ( - sandbox.stub( - fakeSessionFactory, - 'isMultiplexedEnabled', - ) as sinon.SinonStub - ).returns(true); }); it('should return any multiplexed session errors', done => { @@ -2432,92 +2191,6 @@ describe('Database', () => { done(); }); }); - - it('should throw an error if `begin` errors with `Session not found`', done => { - const fakeError = { - code: grpc.status.NOT_FOUND, - message: 'Session not found', - } as MockError; - - beginSnapshotStub.callsFake(callback => callback(fakeError)); - - database.getSnapshot((err, snapshot) => { - assert.strictEqual(err, fakeError); - assert.strictEqual(snapshot, undefined); - done(); - }); - }); - - it('should release the session if `begin` errors', done => { - const fakeError = new Error('err'); - - beginSnapshotStub.callsFake(callback => callback(fakeError)); - - const releaseStub = ( - sandbox.stub(fakeSessionFactory, 'release') as sinon.SinonStub - ).withArgs(fakeSession); - - database.getSnapshot(err => { - assert.strictEqual(err, fakeError); - assert.strictEqual(releaseStub.callCount, 1); - done(); - }); - }); - - // since mux is default enabled, session pool is not getting created - it.skip('should retry if `begin` errors with `Session not found`', done => { - const fakeError = { - code: grpc.status.NOT_FOUND, - message: 'Session not found', - } as MockError; - - const fakeSession2 = new FakeSession(); - const fakeSnapshot2 = new FakeTransaction( - {} as google.spanner.v1.TransactionOptions.ReadOnly, - ); - (sandbox.stub(fakeSnapshot2, 'begin') as sinon.SinonStub).callsFake( - callback => callback(null), - ); - sandbox.stub(fakeSession2, 'snapshot').returns(fakeSnapshot2); - - getSessionStub - .onFirstCall() - .callsFake(callback => callback(null, fakeSession)) - .onSecondCall() - .callsFake(callback => callback(null, fakeSession2)); - - beginSnapshotStub.callsFake(callback => callback(fakeError)); - - // The first session that was not found should be released back into the - // pool, so that the pool can remove it from its inventory. - const releaseStub = sandbox.stub(fakeSessionFactory, 'release'); - - database.getSnapshot((err, snapshot) => { - assert.ifError(err); - assert.strictEqual(snapshot, fakeSnapshot2); - // The first session that error should already have been released back - // to the pool. - assert.strictEqual(releaseStub.callCount, 1); - // Ending the valid snapshot will release its session back into the - // pool. - snapshot.emit('end'); - assert.strictEqual(releaseStub.callCount, 2); - done(); - }); - }); - - it('should release the snapshot on `end`', done => { - const releaseStub = ( - sandbox.stub(fakeSessionFactory, 'release') as sinon.SinonStub - ).withArgs(fakeSession); - - database.getSnapshot(err => { - assert.ifError(err); - fakeSnapshot.emit('end'); - assert.strictEqual(releaseStub.callCount, 1); - done(); - }); - }); }); describe('getTransaction', () => { @@ -2535,10 +2208,7 @@ describe('Database', () => { ); getSessionStub = ( - sandbox.stub( - fakeSessionFactory, - 'getSessionForReadWrite', - ) as sinon.SinonStub + sandbox.stub(fakeSessionFactory, 'getSession') as sinon.SinonStub ).callsFake(callback => { callback(null, fakeSession, fakeTransaction); }); @@ -2570,36 +2240,6 @@ describe('Database', () => { done(); }); }); - - it('should propagate an error', done => { - const error = new Error('resource'); - (sandbox.stub(fakeSessionFactory, 'release') as sinon.SinonStub) - .withArgs(fakeSession) - .throws(error); - - database.on('error', err => { - assert.deepStrictEqual(err, error); - done(); - }); - - database.getTransaction((err, transaction) => { - assert.ifError(err); - transaction.emit('end'); - }); - }); - - it('should release the session on transaction end', done => { - const releaseStub = ( - sandbox.stub(fakeSessionFactory, 'release') as sinon.SinonStub - ).withArgs(fakeSession); - - database.getTransaction((err, transaction) => { - assert.ifError(err); - transaction.emit('end'); - assert.strictEqual(releaseStub.callCount, 1); - done(); - }); - }); }); describe('getSessions', () => { @@ -2918,10 +2558,7 @@ describe('Database', () => { fakePartitionedDml = fakeSession.partitionedDml(); getSessionStub = ( - sandbox.stub( - fakeSessionFactory, - 'getSessionForPartitionedOps', - ) as sinon.SinonStub + sandbox.stub(fakeSessionFactory, 'getSession') as sinon.SinonStub ).callsFake(callback => { callback(null, fakeSession); }); @@ -2986,14 +2623,9 @@ describe('Database', () => { beginStub.callsFake(callback => callback(fakeError)); - const releaseStub = ( - sandbox.stub(fakeSessionFactory, 'release') as sinon.SinonStub - ).withArgs(fakeSession); - database.runPartitionedUpdate(QUERY, (err, rowCount) => { assert.strictEqual(err, fakeError); assert.strictEqual(rowCount, 0); - assert.strictEqual(releaseStub.callCount, 1); done(); }); }); @@ -3010,17 +2642,6 @@ describe('Database', () => { assert.ok(fakeCallback.calledOnce); }); - it('should release the session on transaction end', () => { - const releaseStub = ( - sandbox.stub(fakeSessionFactory, 'release') as sinon.SinonStub - ).withArgs(fakeSession); - - database.runPartitionedUpdate(QUERY, assert.ifError); - fakePartitionedDml.emit('end'); - - assert.strictEqual(releaseStub.callCount, 1); - }); - it('should accept requestOptions', () => { const fakeCallback = sandbox.spy(); @@ -3105,10 +2726,7 @@ describe('Database', () => { fakeSessionFactory = database.sessionFactory_; ( - sandbox.stub( - fakeSessionFactory, - 'getSessionForReadWrite', - ) as sinon.SinonStub + sandbox.stub(fakeSessionFactory, 'getSession') as sinon.SinonStub ).callsFake(callback => { callback(null, SESSION, TRANSACTION); }); @@ -3117,8 +2735,8 @@ describe('Database', () => { it('should return any errors getting a session', done => { const fakeErr = new Error('err'); - (fakeSessionFactory.getSessionForReadWrite as sinon.SinonStub).callsFake( - callback => callback(fakeErr), + (fakeSessionFactory.getSession as sinon.SinonStub).callsFake(callback => + callback(fakeErr), ); database.runTransaction(err => { @@ -3172,32 +2790,13 @@ describe('Database', () => { assert.strictEqual(options, fakeOptions); }); - it('should release the session when finished', done => { - const releaseStub = ( - sandbox.stub(fakeSessionFactory, 'release') as sinon.SinonStub - ).withArgs(SESSION); - - sandbox.stub(FakeTransactionRunner.prototype, 'run').resolves(); - - database.runTransaction(assert.ifError); - - setImmediate(() => { - assert.strictEqual(releaseStub.callCount, 1); - done(); - }); - }); - it('should catch any run errors and return them', done => { - const releaseStub = ( - sandbox.stub(fakeSessionFactory, 'release') as sinon.SinonStub - ).withArgs(SESSION); const fakeError = new Error('err'); sandbox.stub(FakeTransactionRunner.prototype, 'run').rejects(fakeError); database.runTransaction(err => { assert.strictEqual(err, fakeError); - assert.strictEqual(releaseStub.callCount, 1); done(); }); }); @@ -3214,10 +2813,7 @@ describe('Database', () => { beforeEach(() => { fakeSessionFactory = database.sessionFactory_; ( - sandbox.stub( - fakeSessionFactory, - 'getSessionForReadWrite', - ) as sinon.SinonStub + sandbox.stub(fakeSessionFactory, 'getSession') as sinon.SinonStub ).callsFake(callback => { callback(null, SESSION, TRANSACTION); }); @@ -3277,17 +2873,6 @@ describe('Database', () => { const value = await database.runTransactionAsync(assert.ifError); assert.strictEqual(value, fakeValue); }); - - it('should release the session when finished', async () => { - const releaseStub = ( - sandbox.stub(fakeSessionFactory, 'release') as sinon.SinonStub - ).withArgs(SESSION); - - sandbox.stub(FakeAsyncTransactionRunner.prototype, 'run').resolves(); - - await database.runTransactionAsync(assert.ifError); - assert.strictEqual(releaseStub.callCount, 1); - }); }); describe('session', () => { diff --git a/test/index.ts b/test/index.ts index 39f4dfdd2..7b85dfec3 100644 --- a/test/index.ts +++ b/test/index.ts @@ -51,12 +51,14 @@ assert.strictEqual(CLOUD_RESOURCE_HEADER, 'google-cloud-resource-prefix'); const apiConfig = require('../src/spanner_grpc_config.json'); async function disableMetrics(sandbox: sinon.SinonSandbox) { + process.env['SPANNER_DISABLE_BUILTIN_METRICS'] = 'false'; sandbox.stub(process.env, 'SPANNER_DISABLE_BUILTIN_METRICS').value('true'); await MetricsTracerFactory.resetInstance(); MetricsTracerFactory.enabled = false; } async function enableMetrics(sandbox: sinon.SinonSandbox) { + process.env['SPANNER_DISABLE_BUILTIN_METRICS'] = 'false'; sandbox.stub(process.env, 'SPANNER_DISABLE_BUILTIN_METRICS').value('false'); await MetricsTracerFactory.resetInstance(); } @@ -2201,11 +2203,6 @@ describe('Spanner', () => { replaceProjectIdTokenOverride = reqOpts => { return reqOpts; }; - const expectedGaxOpts = extend(true, {}, CONFIG.gaxOpts, { - otherArgs: { - headers: CONFIG.headers, - }, - }); FAKE_GAPIC_CLIENT[CONFIG.method] = function (reqOpts, gaxOpts, arg) { assert.strictEqual(this, FAKE_GAPIC_CLIENT); diff --git a/test/metrics/metrics.ts b/test/metrics/metrics.ts index 5344fdc58..dc6dd35bc 100644 --- a/test/metrics/metrics.ts +++ b/test/metrics/metrics.ts @@ -186,7 +186,6 @@ describe('Test metrics with mock server', () => { let gfeStub; let afeStub; let exporterStub; - const MIN_LATENCY = 0; const commonAttributes = { instance_id: 'instance', status: 'OK', diff --git a/test/mockserver/mockspanner.ts b/test/mockserver/mockspanner.ts index 57937c98e..a10967dab 100644 --- a/test/mockserver/mockspanner.ts +++ b/test/mockserver/mockspanner.ts @@ -31,7 +31,6 @@ import Any = google.protobuf.Any; import QueryMode = google.spanner.v1.ExecuteSqlRequest.QueryMode; import NullValue = google.protobuf.NullValue; import {ExecuteSqlRequest, ReadRequest} from '../../src/transaction'; -import {randomInt} from 'crypto'; const PROTO_PATH = 'spanner.proto'; const IMPORT_PATH = __dirname + '/../../../protos'; diff --git a/test/multiplexed-session.ts b/test/multiplexed-session.ts index d21f912f5..1b6175aff 100644 --- a/test/multiplexed-session.ts +++ b/test/multiplexed-session.ts @@ -22,7 +22,7 @@ import {Database} from '../src/database'; import {Session} from '../src/session'; import {MultiplexedSession} from '../src/multiplexed-session'; import {Transaction} from '../src/transaction'; -import {FakeTransaction} from './session-pool'; +import {FakeTransaction} from './session-factory'; import {grpc} from 'google-gax'; describe('MultiplexedSession', () => { diff --git a/test/session-factory.ts b/test/session-factory.ts index e61199ab2..d1d8ac92b 100644 --- a/test/session-factory.ts +++ b/test/session-factory.ts @@ -14,15 +14,19 @@ * limitations under the License. */ -import {Database, Session, SessionPool} from '../src'; +import {Database, Session} from '../src'; import {SessionFactory} from '../src/session-factory'; import * as sinon from 'sinon'; import * as assert from 'assert'; import {MultiplexedSession} from '../src/multiplexed-session'; -import {util} from '@google-cloud/common'; -import * as db from '../src/database'; -import {FakeTransaction} from './session-pool'; -import {ReleaseError} from '../src/session-pool'; + +export class FakeTransaction { + options; + constructor(options?) { + this.options = options; + } + async begin(): Promise {} +} describe('SessionFactory', () => { let sessionFactory; @@ -30,7 +34,6 @@ describe('SessionFactory', () => { let fakeMuxSession; const sandbox = sinon.createSandbox(); const NAME = 'table-name'; - const POOL_OPTIONS = {}; function noop() {} const DATABASE = { createSession: noop, @@ -58,7 +61,6 @@ describe('SessionFactory', () => { const session = Object.assign(new Session(DATABASE, name), props, { create: sandbox.stub().resolves(), - transaction: sandbox.stub().returns(new FakeTransaction()), }); session.metadata = {multiplexed: false}; @@ -78,7 +80,7 @@ describe('SessionFactory', () => { .callsFake(() => { return Promise.resolve([fakeMuxSession]); }); - sessionFactory = new SessionFactory(DATABASE, NAME, POOL_OPTIONS); + sessionFactory = new SessionFactory(DATABASE, NAME); sessionFactory.parent = DATABASE; }); @@ -87,326 +89,50 @@ describe('SessionFactory', () => { }); describe('instantiation', () => { - describe('when multiplexed session is disabled', () => { - before(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; - }); - - after(() => { - delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; - }); - - it('should create a SessionPool object', () => { - assert(sessionFactory.pool_ instanceof SessionPool); - }); - - it('should accept a custom Pool class', () => { - function FakePool() {} - FakePool.prototype.on = util.noop; - FakePool.prototype.open = util.noop; - - const sessionFactory = new SessionFactory( - DATABASE, - NAME, - FakePool as {} as db.SessionPoolConstructor, - ); - assert(sessionFactory.pool_ instanceof FakePool); - }); - - it('should open the pool', () => { - const openStub = sandbox - .stub(SessionPool.prototype, 'open') - .callsFake(() => {}); - - new SessionFactory(DATABASE, NAME, POOL_OPTIONS); - - assert.strictEqual(openStub.callCount, 1); - }); - - it('should correctly initialize the isMultiplexedEnabled field when GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is disabled', () => { - const sessionFactory = new SessionFactory(DATABASE, NAME, POOL_OPTIONS); - assert.strictEqual(sessionFactory.isMultiplexed, false); - }); - }); - - describe('when multiplexed session is default', () => { - it('should create a MultiplexedSession object', () => { - assert( - sessionFactory.multiplexedSession_ instanceof MultiplexedSession, - ); - }); - - it('should initiate the multiplexed session creation', () => { - const createSessionStub = sandbox - .stub(MultiplexedSession.prototype, 'createSession') - .callsFake(() => {}); - - new SessionFactory(DATABASE, NAME, POOL_OPTIONS); - - assert.strictEqual(createSessionStub.callCount, 1); - }); - - it('should correctly initialize the isMultiplexedEnabled field when GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is enabled', () => { - const sessionFactory = new SessionFactory(DATABASE, NAME, POOL_OPTIONS); - assert.strictEqual(sessionFactory.isMultiplexed, true); - }); + it('should create a MultiplexedSession object', () => { + assert(sessionFactory.multiplexedSession_ instanceof MultiplexedSession); }); - describe('when multiplexed session is disabled for r/w', () => { - before(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW = 'false'; - }); - - after(() => { - delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; - delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW; - }); + it('should initiate the multiplexed session creation', () => { + const createSessionStub = sandbox + .stub(MultiplexedSession.prototype, 'createSession') + .callsFake(() => {}); - it('should correctly initialize the isMultiplexedRW field', () => { - const sessionFactory = new SessionFactory(DATABASE, NAME, POOL_OPTIONS); - assert.strictEqual(sessionFactory.isMultiplexedRW, false); - }); - }); + new SessionFactory(DATABASE, NAME); - describe('when multiplexed session is default for r/w', () => { - it('should correctly initialize the isMultiplexedRW field', () => { - const sessionFactory = new SessionFactory(DATABASE, NAME, POOL_OPTIONS); - assert.strictEqual(sessionFactory.isMultiplexedRW, true); - }); + assert.strictEqual(createSessionStub.callCount, 1); }); }); describe('getSession', () => { - describe('when multiplexed session is disabled', () => { - before(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; - }); - - after(() => { - delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; - }); - - it('should retrieve a regular session from the pool', done => { - ( - sandbox.stub(sessionFactory.pool_, 'getSession') as sinon.SinonStub - ).callsFake(callback => callback(null, fakeSession)); - sessionFactory.getSession((err, resp) => { - assert.strictEqual(err, null); - assert.strictEqual(resp, fakeSession); - done(); - }); - }); - - it('should propagate errors when regular session retrieval fails', done => { - const fakeError = new Error(); - ( - sandbox.stub(sessionFactory.pool_, 'getSession') as sinon.SinonStub - ).callsFake(callback => callback(fakeError, null)); - sessionFactory.getSession((err, resp) => { - assert.strictEqual(err, fakeError); - assert.strictEqual(resp, null); - done(); - }); - }); - }); - - describe('when multiplexed session is default', () => { - it('should return the multiplexed session', done => { - ( - sandbox.stub( - sessionFactory.multiplexedSession_, - 'getSession', - ) as sinon.SinonStub - ).callsFake(callback => callback(null, fakeMuxSession)); - sessionFactory.getSession((err, resp) => { - assert.strictEqual(err, null); - assert.strictEqual(resp, fakeMuxSession); - assert.strictEqual(resp?.metadata.multiplexed, true); - assert.strictEqual(fakeMuxSession.metadata.multiplexed, true); - done(); - }); - }); - - it('should propagate error when multiplexed session return fails', done => { - const fakeError = new Error(); - ( - sandbox.stub( - sessionFactory.multiplexedSession_, - 'getSession', - ) as sinon.SinonStub - ).callsFake(callback => callback(fakeError, null)); - sessionFactory.getSession((err, resp) => { - assert.strictEqual(err, fakeError); - assert.strictEqual(resp, null); - done(); - }); - }); - }); - }); - - describe('getSessionForReadWrite', () => { - describe('when multiplexed session for r/w disabled', () => { - before(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW = 'false'; - }); - - after(() => { - delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; - delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW; - }); - - it('should retrieve a regular session from the pool', done => { - ( - sandbox.stub(sessionFactory.pool_, 'getSession') as sinon.SinonStub - ).callsFake(callback => callback(null, fakeSession)); - sessionFactory.getSessionForReadWrite((err, resp) => { - assert.strictEqual(err, null); - assert.strictEqual(resp, fakeSession); - done(); - }); - }); - - it('should propagate errors when regular session retrieval fails', done => { - const fakeError = new Error(); - ( - sandbox.stub(sessionFactory.pool_, 'getSession') as sinon.SinonStub - ).callsFake(callback => callback(fakeError, null)); - sessionFactory.getSessionForReadWrite((err, resp) => { - assert.strictEqual(err, fakeError); - assert.strictEqual(resp, null); - done(); - }); - }); - }); - - describe('when multiplexed session for r/w not disabled', () => { - it('should return the multiplexed session', done => { - ( - sandbox.stub( - sessionFactory.multiplexedSession_, - 'getSession', - ) as sinon.SinonStub - ).callsFake(callback => callback(null, fakeMuxSession)); - sessionFactory.getSessionForReadWrite((err, resp) => { - assert.strictEqual(err, null); - assert.strictEqual(resp, fakeMuxSession); - assert.strictEqual(resp?.metadata.multiplexed, true); - assert.strictEqual(fakeMuxSession.metadata.multiplexed, true); - done(); - }); - }); - - it('should propagate error when multiplexed session return fails', done => { - const fakeError = new Error(); - ( - sandbox.stub( - sessionFactory.multiplexedSession_, - 'getSession', - ) as sinon.SinonStub - ).callsFake(callback => callback(fakeError, null)); - sessionFactory.getSessionForReadWrite((err, resp) => { - assert.strictEqual(err, fakeError); - assert.strictEqual(resp, null); - done(); - }); - }); - }); - }); - - describe('getPool', () => { - it('should return the session pool object', () => { - const pool = sessionFactory.getPool(); - assert(pool instanceof SessionPool); - assert.deepStrictEqual(pool, sessionFactory.pool_); - }); - }); - - describe('release', () => { - describe('when GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is not disabled', () => { - it('should not call the release method', () => { - const releaseStub = sandbox.stub(sessionFactory.pool_, 'release'); - const fakeMuxSession = createMuxSession(); - sessionFactory.release(fakeMuxSession); - assert.strictEqual(releaseStub.callCount, 0); - }); - }); - - describe('when GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is disabled', () => { - before(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; - }); - - after(() => { - delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; - }); - - it('should call the release method to release a regular session', () => { - const releaseStub = sandbox.stub(sessionFactory.pool_, 'release'); - const fakeSession = createSession(); - sessionFactory.release(fakeSession); - assert.strictEqual(releaseStub.callCount, 1); - }); - - it('should propagate an error when release fails', () => { - const fakeSession = createSession(); - try { - sessionFactory.release(fakeSession); - assert.fail('Expected error was not thrown'); - } catch (error) { - assert.strictEqual( - (error as ReleaseError).message, - 'Unable to release unknown resource.', - ); - assert.strictEqual((error as ReleaseError).resource, fakeSession); - } - }); - }); - }); - - describe('isMultiplexedEnabled', () => { - describe('when GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is not disabled', () => { - it('should have enabled the multiplexed', () => { - const sessionFactory = new SessionFactory(DATABASE, NAME, POOL_OPTIONS); - assert.strictEqual(sessionFactory.isMultiplexedEnabled(), true); - }); - }); - - describe('when GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is disabled', () => { - before(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; - }); - after(() => { - delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; - }); - it('should not have enabled the multiplexed', () => { - const sessionFactory = new SessionFactory(DATABASE, NAME, POOL_OPTIONS); - assert.strictEqual(sessionFactory.isMultiplexedEnabled(), false); - }); - }); - }); - - describe('isMultiplexedEnabledForRW', () => { - describe('when multiplexed session is not disabled for read/write transactions', () => { - it('should have enabled the multiplexed', () => { - const sessionFactory = new SessionFactory(DATABASE, NAME, POOL_OPTIONS); - assert.strictEqual(sessionFactory.isMultiplexedEnabledForRW(), true); - }); - }); - - describe('when multiplexed session is disabled for read/write transactions', () => { - before(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW = 'false'; - }); - after(() => { - delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; - delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW; - }); - it('should not have enabled the multiplexed', () => { - const sessionFactory = new SessionFactory(DATABASE, NAME, POOL_OPTIONS); - assert.strictEqual(sessionFactory.isMultiplexedEnabledForRW(), false); + it('should return the multiplexed session', done => { + ( + sandbox.stub( + sessionFactory.multiplexedSession_, + 'getSession', + ) as sinon.SinonStub + ).callsFake(callback => callback(null, fakeMuxSession)); + sessionFactory.getSession((err, resp) => { + assert.strictEqual(err, null); + assert.strictEqual(resp, fakeMuxSession); + assert.strictEqual(resp?.metadata.multiplexed, true); + assert.strictEqual(fakeMuxSession.metadata.multiplexed, true); + done(); + }); + }); + + it('should propagate error when multiplexed session return fails', done => { + const fakeError = new Error(); + ( + sandbox.stub( + sessionFactory.multiplexedSession_, + 'getSession', + ) as sinon.SinonStub + ).callsFake(callback => callback(fakeError, null)); + sessionFactory.getSession((err, resp) => { + assert.strictEqual(err, fakeError); + assert.strictEqual(resp, null); + done(); }); }); }); diff --git a/test/session-pool.ts b/test/session-pool.ts deleted file mode 100644 index e64b91390..000000000 --- a/test/session-pool.ts +++ /dev/null @@ -1,1434 +0,0 @@ -/*! - * Copyright 2017 Google Inc. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import * as assert from 'assert'; -import {before, beforeEach, afterEach, describe, it} from 'mocha'; -import * as events from 'events'; -import * as extend from 'extend'; -import PQueue from 'p-queue'; -import * as proxyquire from 'proxyquire'; -import * as sinon from 'sinon'; -import stackTrace = require('stack-trace'); -import timeSpan = require('time-span'); - -import {Database} from '../src/database'; -import {Session} from '../src/session'; -import * as sp from '../src/session-pool'; -import {Transaction} from '../src/transaction'; -import {grpc} from 'google-gax'; -const {startTrace} = require('../src/instrument'); - -let pQueueOverride: typeof PQueue | null = null; - -function FakePQueue(options) { - return new (pQueueOverride || PQueue)(options); -} - -FakePQueue.default = FakePQueue; - -export class FakeTransaction { - options; - constructor(options?) { - this.options = options; - } - async begin(): Promise {} -} - -const fakeStackTrace = extend({}, stackTrace); - -function noop() {} - -describe('SessionPool', () => { - let sessionPool: sp.SessionPool; - // tslint:disable-next-line variable-name - let SessionPool: typeof sp.SessionPool; - let inventory; - - const DATABASE = { - batchCreateSessions: noop, - databaseRole: 'parent_role', - } as unknown as Database; - - const sandbox = sinon.createSandbox(); - const shouldNotBeCalled = sandbox.stub().throws('Should not be called.'); - - const createSession = (name = 'id', props?): Session => { - props = props || {}; - - return Object.assign(new Session(DATABASE, name), props, { - create: sandbox.stub().resolves(), - delete: sandbox.stub().resolves(), - keepAlive: sandbox.stub().resolves(), - transaction: sandbox.stub().returns(new FakeTransaction()), - }); - }; - - const createStackFrame = (): stackTrace.StackFrame => { - return { - getFunctionName: sandbox.stub().returns('myFunction'), - getMethodName: sandbox.stub().returns('MyClass.myMethod'), - getFileName: sandbox.stub().returns('path/to/file.js'), - getLineNumber: sandbox.stub().returns('99'), - getColumnNumber: sandbox.stub().returns('13'), - getTypeName: sandbox.stub().returns('type'), - isNative: sandbox.stub().returns(false), - isConstructor: sandbox.stub().returns(false), - }; - }; - - before(() => { - SessionPool = proxyquire('../src/session-pool.js', { - 'p-queue': FakePQueue, - 'stack-trace': fakeStackTrace, - }).SessionPool; - }); - - beforeEach(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS = - 'false'; - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW = 'false'; - DATABASE.session = createSession; - sessionPool = new SessionPool(DATABASE); - inventory = sessionPool._inventory; - }); - - afterEach(() => { - delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; - delete process.env - .GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS; - delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW; - pQueueOverride = null; - sandbox.restore(); - }); - - describe('formatTrace', () => { - let stackFrame: stackTrace.StackFrame; - let fakeTrace: stackTrace.StackFrame[]; - let file: string; - - beforeEach(() => { - stackFrame = createStackFrame(); - fakeTrace = [stackFrame]; - file = `${stackFrame.getFileName()}:${stackFrame.getLineNumber()}:${stackFrame.getColumnNumber()}`; - }); - - it('should return a trace with the method name', () => { - (stackFrame.getFunctionName as sinon.SinonStub).returns(undefined); - - const expected = `Session leak detected!\n at ${stackFrame.getMethodName()} (${file})`; - const actual = SessionPool.formatTrace(fakeTrace); - - assert.strictEqual(expected, actual); - }); - - it('should return a trace with the function name', () => { - (stackFrame.getMethodName as sinon.SinonStub).returns(undefined); - - const expected = `Session leak detected!\n at ${stackFrame.getFunctionName()} (${file})`; - const actual = SessionPool.formatTrace(fakeTrace); - - assert.strictEqual(expected, actual); - }); - }); - - describe('available', () => { - it('should return the number of available sessions', () => { - inventory.sessions = [createSession(), createSession(), createSession()]; - - assert.strictEqual(sessionPool.available, 3); - }); - }); - - describe('borrowed', () => { - beforeEach(() => { - inventory.borrowed = new Set([createSession(), createSession()]); - }); - - it('should return the number of borrowed sessions', () => { - assert.strictEqual(sessionPool.borrowed, 2); - }); - - it('should factor in any creation pending sessions', () => { - sessionPool._pending = 1; - assert.strictEqual(sessionPool.borrowed, 3); - }); - }); - - describe('isFull', () => { - it('should indicate if the pool is full', () => { - sessionPool.options.max = 1; - - assert.strictEqual(sessionPool.isFull, false); - inventory.borrowed = new Set([createSession()]); - assert.strictEqual(sessionPool.isFull, true); - }); - }); - - describe('size', () => { - it('should return the size of the pool', () => { - inventory.sessions = [createSession(), createSession(), createSession()]; - inventory.borrowed = new Set([createSession()]); - - assert.strictEqual(sessionPool.size, 4); - }); - }); - - describe('writes', () => { - beforeEach(() => { - inventory.sessions = [createSession(), createSession(), createSession()]; - }); - - it('should get the total number of read/write sessions', () => { - assert.strictEqual(sessionPool.size, 3); - }); - - it('should factor in borrowed sessions', () => { - const session = createSession('id', {}); - - inventory.borrowed.add(session); - - assert.strictEqual(sessionPool.size, 4); - assert.strictEqual(sessionPool.available, 3); - assert.strictEqual(sessionPool.borrowed, 1); - }); - }); - - describe('instantiation', () => { - it('should localize the database instance', () => { - assert.strictEqual(sessionPool.database, DATABASE); - }); - - describe('options', () => { - it('should apply defaults', () => { - assert.strictEqual(sessionPool.options.acquireTimeout, Infinity); - assert.strictEqual(sessionPool.options.concurrency, Infinity); - assert.strictEqual(sessionPool.options.fail, false); - assert.strictEqual(sessionPool.options.idlesAfter, 10); - assert.strictEqual(sessionPool.options.keepAlive, 30); - assert.deepStrictEqual(sessionPool.options.labels, {}); - assert.strictEqual(sessionPool.options.min, 25); - assert.strictEqual(sessionPool.options.max, 100); - assert.strictEqual(sessionPool.options.maxIdle, 1); - }); - - it('should not override user options', () => { - sessionPool = new SessionPool(DATABASE, {acquireTimeout: 0}); - assert.strictEqual(sessionPool.options.acquireTimeout, 0); - }); - - it('should override user options for databaseRole', () => { - sessionPool = new SessionPool(DATABASE, {databaseRole: 'child_role'}); - assert.strictEqual(sessionPool.options.databaseRole, 'child_role'); - }); - - it('should use default value of Database for databaseRole', () => { - sessionPool = new SessionPool(DATABASE); - assert.strictEqual(sessionPool.options.databaseRole, 'parent_role'); - }); - - describe('min and max', () => { - const minGtMax = /Min sessions may not be greater than max sessions\./; - - it('should not accept min>max', () => { - assert.throws(() => { - return new SessionPool(DATABASE, {min: 20, max: 10}); - }, minGtMax); - }); - }); - }); - - it('should set isOpen to false', () => { - assert.strictEqual(sessionPool.isOpen, false); - }); - - it('should create an inventory object', () => { - assert.deepStrictEqual(inventory, { - sessions: [], - borrowed: new Set(), - }); - }); - - it('should create a request queue', () => { - const poolOptions = { - concurrency: 11, - }; - - pQueueOverride = class { - constructor(options) { - return options; - } - } as typeof PQueue; - - sessionPool = new SessionPool(DATABASE, poolOptions); - assert.deepStrictEqual(sessionPool._requests, { - concurrency: poolOptions.concurrency, - }); - }); - - it('should create an acquire queue', () => { - pQueueOverride = class { - constructor(options) { - return options; - } - } as typeof PQueue; - - sessionPool = new SessionPool(DATABASE); - assert.deepStrictEqual(sessionPool._acquires, { - concurrency: 1, - }); - }); - - it('should create a map of traces', () => { - assert.deepStrictEqual(sessionPool._traces, new Map()); - }); - - it('should inherit from EventEmitter', () => { - assert(sessionPool instanceof events.EventEmitter); - }); - }); - - describe('close', () => { - beforeEach(() => { - inventory.sessions = [createSession(), createSession(), createSession()]; - inventory.borrowed = new Set([createSession(), createSession()]); - sessionPool._destroy = sandbox.stub().resolves(); - }); - - it('should clear the inventory', done => { - sessionPool.close(() => { - assert.strictEqual(sessionPool.size, 0); - done(); - }); - }); - - it('should stop housekeeping', done => { - sessionPool._stopHouseKeeping = done; - sessionPool.close(noop); - }); - - it('should set isOpen to false', () => { - sessionPool.isOpen = true; - sessionPool.close(noop); - - assert.strictEqual(sessionPool.isOpen, false); - }); - - it('should emit the close event', done => { - sessionPool.on('close', done); - sessionPool.close(noop); - }); - - it('should destroy all the sessions', done => { - const sessions = [...inventory.sessions, ...inventory.borrowed]; - - let destroyed = 0; - - sessionPool._destroy = async session => { - assert.strictEqual(session, sessions[destroyed++]); - }; - - sessionPool.close(err => { - assert.ifError(err); - assert.strictEqual(destroyed, sessions.length); - done(); - }); - }); - - it('should execute the callback on idle', done => { - const stub = sandbox.stub(sessionPool._requests, 'onIdle').resolves(); - - sessionPool.close(err => { - assert.ifError(err); - assert.strictEqual(stub.callCount, 1); - done(); - }); - }); - - it('should return a leak error', done => { - const fakeLeaks = ['a', 'b']; - - sandbox.stub(sessionPool, '_getLeaks').returns(fakeLeaks); - - sessionPool.close((err?: sp.SessionLeakError) => { - assert.strictEqual(err!.name, 'SessionLeakError'); - assert.strictEqual( - err!.message, - `${fakeLeaks.length} session leak(s) detected.`, - ); - assert.strictEqual(err!.messages, fakeLeaks); - done(); - }); - }); - }); - - describe('getSession', () => { - it('should acquire a session', done => { - const fakeSession = createSession(); - - sandbox.stub(sessionPool, '_acquire').resolves(fakeSession); - - sessionPool.getSession((err, session) => { - assert.ifError(err); - assert.strictEqual(session, fakeSession); - done(); - }); - }); - - it('should pass any errors to the callback', done => { - const error = new Error('err'); - - sandbox.stub(sessionPool, '_acquire').rejects(error); - - sessionPool.getSession(err => { - assert.strictEqual(err, error); - done(); - }); - }); - - it('should pass back the session and txn', done => { - const fakeTxn = new FakeTransaction() as unknown as Transaction; - const fakeSession = createSession(); - - fakeSession.txn = fakeTxn; - - sandbox.stub(sessionPool, '_acquire').resolves(fakeSession); - - sessionPool.getSession((err, session, txn) => { - assert.ifError(err); - assert.strictEqual(session, fakeSession); - assert.strictEqual(txn, fakeTxn); - done(); - }); - }); - }); - - describe('open', () => { - let fillStub: sinon.SinonStub<[], Promise>; - - beforeEach(() => { - sessionPool._stopHouseKeeping = sandbox.stub(); - fillStub = sandbox.stub(sessionPool, '_fill').resolves(); - }); - - it('should create an onclose promise', () => { - sessionPool.open(); - - assert(sessionPool._onClose instanceof Promise); - setImmediate(() => sessionPool.emit('close')); - return sessionPool._onClose; - }); - - it('should start housekeeping', done => { - sessionPool._startHouseKeeping = done; - sessionPool.open(); - }); - - it('should set isOpen to true', () => { - sessionPool.open(); - assert.strictEqual(sessionPool.isOpen, true); - }); - - it('should emit the open event', done => { - sessionPool.on('open', done); - sessionPool.open(); - }); - - it('should fill the pool', () => { - sessionPool.open(); - assert.strictEqual(fillStub.callCount, 1); - }); - - it('should not trigger unhandled promise rejection', () => { - const error = { - code: grpc.status.PERMISSION_DENIED, - message: 'spanner.sessions.create', - } as grpc.ServiceError; - - sandbox.restore(); - sandbox.stub(sessionPool, '_fill').rejects(error); - - const originalRejection = process.listeners('unhandledRejection').pop(); - if (originalRejection) { - process.removeListener('unhandledRejection', originalRejection!); - } - - process.once('unhandledRejection', err => { - assert.ifError(err); - }); - - sessionPool.open(); - - if (originalRejection) { - process.listeners('unhandledRejection').push(originalRejection!); - } - }); - - it('should not trigger unhandled promise rejection when default credentials not set', () => { - const error = { - message: 'Could not load the default credentials', - } as grpc.ServiceError; - - sandbox.restore(); - sandbox.stub(sessionPool, '_fill').rejects(error); - - const originalRejection = process.listeners('unhandledRejection').pop(); - if (originalRejection) { - process.removeListener('unhandledRejection', originalRejection!); - } - - process.once('unhandledRejection', err => { - assert.ifError(err); - }); - - sessionPool.open(); - - if (originalRejection) { - process.listeners('unhandledRejection').push(originalRejection!); - } - }); - - it('should not trigger unhandled promise rejection when projectId not set', () => { - const error = { - message: 'Unable to detect a Project Id in the current environment', - } as grpc.ServiceError; - - sandbox.restore(); - sandbox.stub(sessionPool, '_fill').rejects(error); - - const originalRejection = process.listeners('unhandledRejection').pop(); - if (originalRejection) { - process.removeListener('unhandledRejection', originalRejection!); - } - - process.once('unhandledRejection', err => { - assert.ifError(err); - }); - - sessionPool.open(); - - if (originalRejection) { - process.listeners('unhandledRejection').push(originalRejection!); - } - }); - }); - - describe('release', () => { - let prepStub: sinon.SinonStub<[Session], void>; - - beforeEach(() => { - prepStub = sandbox.stub(sessionPool, '_prepareTransaction').resolves(); - }); - - it('should throw an error when returning unknown resources', () => { - const badResource = createSession(); - - try { - sessionPool.release(badResource); - shouldNotBeCalled(); - } catch (e) { - assert.strictEqual( - (e as sp.ReleaseError).message, - 'Unable to release unknown resource.', - ); - assert.strictEqual((e as sp.ReleaseError).resource, badResource); - } - }); - - it('should delete any old transactions', () => { - const session = createSession(); - - sessionPool._release = noop; - inventory.borrowed.add(session); - session.txn = {} as Transaction; - - sessionPool.release(session); - assert.strictEqual(session.txn, undefined); - }); - - it('should update the lastUsed timestamp', () => { - const session = createSession(); - - sessionPool._release = noop; - inventory.borrowed.add(session); - session.lastUsed = null!; - - sessionPool.release(session); - assert(isAround(session.lastUsed, Date.now())); - }); - - describe('read and write', () => { - let fakeSession; - - beforeEach(() => { - fakeSession = createSession('id'); - inventory.borrowed.add(fakeSession); - }); - - it('should release the read/write session', done => { - prepStub.resolves(); - sandbox - .stub(sessionPool, '_release') - .withArgs(fakeSession) - .callsFake(() => done()); - - sessionPool.release(fakeSession); - }); - }); - }); - - describe('_acquire', () => { - beforeEach(() => { - sessionPool.isOpen = true; - sessionPool._isValidSession = () => true; - }); - - it('should return a closed error if not open', async () => { - sessionPool.isOpen = false; - - try { - await sessionPool._acquire(); - shouldNotBeCalled(); - } catch (e) { - assert.strictEqual( - (e as sp.ReleaseError).message, - 'Database is closed.', - ); - } - }); - - it('should return a timeout error if a timeout happens', async () => { - sessionPool.options.acquireTimeout = 1; - - sessionPool._acquires.add = fn => { - return new Promise(r => setTimeout(r, 3)).then(fn); - }; - - try { - await sessionPool._acquire(); - shouldNotBeCalled(); - } catch (e) { - assert.strictEqual( - (e as sp.ReleaseError).message, - 'Timeout occurred while acquiring session.', - ); - } - }); - - it('should return a session', async () => { - const fakeSession = createSession(); - const now = Date.now(); - - const stub = sandbox - .stub(sessionPool, '_getSession') - .resolves(fakeSession); - const session = await sessionPool._acquire(); - const [startTime] = stub.getCall(0).args; - - assert(isAround(startTime, now)); - assert.strictEqual(session, fakeSession); - }); - - it('should drop expired sessions', async () => { - const badSession = createSession(); - const goodSession = createSession(); - - sessionPool._isValidSession = session => session === goodSession; - inventory.borrowed.add(badSession); - inventory.borrowed.add(goodSession); - - const stub = sandbox.stub(sessionPool, '_getSession'); - - stub.onFirstCall().resolves(badSession); - stub.onSecondCall().resolves(goodSession); - - const session = await sessionPool._acquire(); - - assert.strictEqual(session, goodSession); - assert.strictEqual(sessionPool.size, 1); - }); - - it('should capture the stack trace', async () => { - const id = 'abc'; - const fakeSession = createSession(); - const fakeTrace = []; - - fakeSession.id = id; - sandbox.stub(sessionPool, '_getSession').resolves(fakeSession); - sandbox.stub(fakeStackTrace, 'get').returns(fakeTrace); - - await sessionPool._acquire(); - - const trace = sessionPool._traces.get(id); - assert.strictEqual(trace, fakeTrace); - }); - - it('should convert read sessions to write sessions', async () => { - const fakeSession = createSession('id'); - - sandbox.stub(sessionPool, '_getSession').resolves(fakeSession); - const prepStub = sandbox - .stub(sessionPool, '_prepareTransaction') - .withArgs(fakeSession); - - const session = await sessionPool._acquire(); - - assert.strictEqual(session, fakeSession); - assert.strictEqual(prepStub.callCount, 1); - }); - }); - - describe('_borrow', () => { - it('should mark the session as borrowed', () => { - const fakeSession = createSession(); - - inventory.sessions.push(fakeSession); - - sessionPool._borrow(fakeSession); - - assert.strictEqual(inventory.sessions.indexOf(fakeSession), -1); - assert(inventory.borrowed.has(fakeSession)); - }); - }); - - describe('_borrowFrom', () => { - it('should borrow the last pushed session', () => { - const fakeSession1 = createSession(); - const fakeSession2 = createSession(); - - inventory.sessions.push(fakeSession1); - inventory.sessions.push(fakeSession2); - - let session = sessionPool._borrowFrom(); - assert.strictEqual(session, fakeSession2); - session = sessionPool._borrowFrom(); - assert.strictEqual(session, fakeSession1); - }); - }); - - describe('_borrowNextAvailableSession', () => { - it('should borrow when available', () => { - const fakeSession = createSession(); - - inventory.sessions.push(fakeSession); - sandbox.stub(sessionPool, '_borrowFrom').returns(fakeSession); - - const session = sessionPool._borrowNextAvailableSession(); - - assert.strictEqual(session, fakeSession); - }); - }); - - describe('_createSession', () => { - let stub: sinon.SinonStub<[number], Promise>; - - beforeEach(() => { - stub = sandbox.stub(sessionPool, '_createSessions').resolves(); - }); - - it('should create a single session', async () => { - await sessionPool._createSession(); - const [numbers] = stub.lastCall.args; - assert.deepStrictEqual(numbers, 1); - }); - }); - - describe('_createSessions', () => { - const OPTIONS = 3; - const RESPONSE = [[{}, {}, {}]]; - - let stub; - let releaseStub; - - beforeEach(() => { - stub = sandbox.stub(DATABASE, 'batchCreateSessions').resolves(RESPONSE); - releaseStub = sandbox.stub(sessionPool, 'release'); - }); - - it('should update the number of pending sessions', async () => { - await sessionPool._createSessions(OPTIONS); - assert.strictEqual(sessionPool.size, 3); - }); - - it('should create the appropriate number of sessions', async () => { - await sessionPool._createSessions(OPTIONS); - const [options] = stub.lastCall.args; - assert.strictEqual(options.count, 3); - }); - - it('should pass the session labels', async () => { - const labels = {foo: 'bar'}; - sessionPool.options.labels = labels; - await sessionPool._createSessions(OPTIONS); - const [options] = stub.lastCall.args; - assert.strictEqual(options.labels, labels); - }); - - it('should pass the session database role', async () => { - const databaseRole = 'child_role'; - sessionPool.options.databaseRole = databaseRole; - await sessionPool._createSessions(OPTIONS); - const [options] = stub.lastCall.args; - assert.strictEqual(options.databaseRole, databaseRole); - }); - - it('should make multiple requests if needed', async () => { - stub.onCall(0).resolves([[{}, {}]]); - stub.onCall(1).resolves([[{}]]); - - await sessionPool._createSessions(OPTIONS); - - assert.strictEqual(stub.callCount, 2); - assert.strictEqual(sessionPool.size, 3); - }); - - it('should reject with any request errors', async () => { - const error = new Error('err'); - stub.rejects(error); - - try { - await sessionPool._createSessions(OPTIONS); - throw new Error('Should not make it this far.'); - } catch (e) { - assert.strictEqual(e, error); - } - }); - - it('should add each session to the inventory', async () => { - await sessionPool._createSessions(OPTIONS); - assert.strictEqual(sessionPool.borrowed, 3); - - setImmediate(() => { - RESPONSE[0].forEach((fakeSession, i) => { - const [session] = releaseStub.getCall(i).args; - assert.strictEqual(session, fakeSession); - }); - }); - }); - - it('should prepare the correct number of write sessions', async () => { - await sessionPool._createSessions(OPTIONS); - - setImmediate(() => { - assert.strictEqual(sessionPool.size, OPTIONS); - }); - }); - }); - - describe('_destroy', () => { - it('should delete the session', async () => { - const fakeSession = createSession(); - const stub = fakeSession.delete as sinon.SinonStub; - - await sessionPool._destroy(fakeSession); - assert.strictEqual(stub.callCount, 1); - }); - - it('should emit any errors', done => { - const error = new Error('err'); - const fakeSession = createSession(); - const stub = fakeSession.delete as sinon.SinonStub; - - stub.rejects(error); - - sessionPool.on('error', err => { - assert.strictEqual(err, error); - done(); - }); - - void sessionPool._destroy(fakeSession); - }); - }); - - describe('_evictIdleSessions', () => { - let destroyStub: sinon.SinonStub<[Session], Promise>; - let fakeSessions; - - beforeEach(() => { - inventory.sessions = [ - createSession('id'), - createSession('id'), - createSession('id'), - ]; - - sessionPool.options.maxIdle = 0; - sessionPool.options.min = 0; - - fakeSessions = [...inventory.sessions]; - - sandbox - .stub(sessionPool, '_getIdleSessions') - .returns(fakeSessions.slice()); - - fakeSessions.reverse(); - destroyStub = sandbox.stub(sessionPool, '_destroy').resolves(); - }); - - it('should evict the sessions', () => { - sessionPool._evictIdleSessions(); - - assert.strictEqual(destroyStub.callCount, fakeSessions.length); - - fakeSessions.forEach((session, i) => { - const destroyed = destroyStub.getCall(i).args[0]; - assert.strictEqual(destroyed, session); - }); - }); - - it('should respect the maxIdle option', () => { - sessionPool.options.maxIdle = 2; - sessionPool._evictIdleSessions(); - - assert.strictEqual(destroyStub.callCount, 1); - const destroyed = destroyStub.getCall(0).args[0]; - assert.strictEqual(destroyed, fakeSessions[0]); - }); - - it('should respect the min value', () => { - sessionPool.options.min = 1; - sessionPool._evictIdleSessions(); - - assert.strictEqual(destroyStub.callCount, 2); - - fakeSessions.slice(0, 2).forEach((session, i) => { - const destroyed = destroyStub.getCall(i).args[0]; - assert.strictEqual(destroyed, session); - }); - }); - - it('should not evict if the session is not there', () => { - sandbox.restore(); - fakeSessions[1] = undefined; - sandbox - .stub(sessionPool, '_getIdleSessions') - .returns(fakeSessions.slice()); - destroyStub = sandbox.stub(sessionPool, '_destroy').resolves(); - - sessionPool._evictIdleSessions(); - - assert.strictEqual(destroyStub.callCount, fakeSessions.length - 1); - }); - }); - - describe('_fill', () => { - let stub: sinon.SinonStub<[number], Promise>; - - beforeEach(() => { - stub = sandbox.stub(sessionPool, '_createSessions'); - sessionPool.options.min = 8; - }); - - it('should create the min number of required sessions', async () => { - await sessionPool._fill(); - - const amount = stub.lastCall.args[0]; - - assert.strictEqual(amount, 8); - }); - - it('should respect the current size of the pool', async () => { - inventory.sessions = [createSession(), createSession(), createSession()]; - - await sessionPool._fill(); - - const amount = stub.lastCall.args[0]; - - assert.strictEqual(amount, 5); - }); - - it('should noop when no sessions are needed', async () => { - sessionPool.options.min = 0; - await sessionPool._fill(); - - assert.strictEqual(stub.callCount, 0); - }); - - it('should emit any request errors that occur', done => { - const error = new Error('err'); - - stub.rejects(error); - - sessionPool._fill().catch(err => { - assert.strictEqual(err, error); - done(); - }); - }); - }); - - describe('_getIdleSessions', () => { - it('should return a list of idle sessions', () => { - const idlesAfter = (sessionPool.options.idlesAfter = 1); // 1 minute - const idleTimestamp = Date.now() - idlesAfter * 60000; - - const fake = (inventory.sessions = [ - {lastUsed: Date.now()}, - {lastUsed: idleTimestamp}, - {lastUsed: idleTimestamp}, - ]); - - const expectedSessions = [fake[1], fake[2]]; - const idleSessions = sessionPool._getIdleSessions(); - - assert.deepStrictEqual(idleSessions, expectedSessions); - }); - }); - - describe('_getLeaks', () => { - it('should return an array of leaks', () => { - const trace1 = [createStackFrame()]; - const trace2 = [createStackFrame(), createStackFrame()]; - - const formatted1 = 'c'; - const formatted2 = 'd'; - - const stub = sandbox.stub(SessionPool, 'formatTrace'); - - stub.withArgs(trace1).returns(formatted1); - stub.withArgs(trace2).returns(formatted2); - - sessionPool._traces.set('a', trace1); - sessionPool._traces.set('b', trace2); - - const leaks = sessionPool._getLeaks(); - - assert.deepStrictEqual(leaks, [formatted1, formatted2]); - }); - }); - - describe('_getSession', () => { - let startTime: number; - - beforeEach(() => { - sessionPool._onClose = new Promise(resolve => { - sessionPool.on('close', resolve); - }); - sessionPool.options.max = 0; - startTime = Date.now(); - }); - - it('should return a session if one is available', async () => { - const fakeSession = createSession(); - - inventory.sessions = [fakeSession]; - - sandbox - .stub(sessionPool, '_borrowNextAvailableSession') - .returns(fakeSession); - - const session = await sessionPool._getSession(startTime); - assert.strictEqual(session, fakeSession); - }); - - it('should return an error if empty and fail = true', async () => { - sessionPool.options.fail = true; - - try { - await sessionPool._getSession(startTime); - shouldNotBeCalled(); - } catch (e) { - assert.strictEqual( - (e as sp.ReleaseError).message, - 'No resources available.', - ); - } - }); - - it('should throw a closed error if the pool closes', async () => { - setTimeout(() => sessionPool.emit('close'), 100); - - try { - await sessionPool._getSession(startTime); - shouldNotBeCalled(); - } catch (e) { - assert.strictEqual( - (e as sp.ReleaseError).message, - 'Database is closed.', - ); - } - }); - - it('should return a session when it becomes available', async () => { - const fakeSession = createSession(); - - sandbox - .stub(sessionPool, '_borrowNextAvailableSession') - .returns(fakeSession); - setTimeout(() => sessionPool.emit('session-available'), 100); - - const session = await sessionPool._getSession(startTime); - assert.strictEqual(session, fakeSession); - }); - - it('should use the acquireTimeout if set', async () => { - const end = timeSpan(); - const timeout = (sessionPool.options.acquireTimeout = 100); - - try { - await sessionPool._getSession(startTime); - shouldNotBeCalled(); - } catch (e) { - assert(isAround(timeout, end())); - assert.strictEqual( - (e as sp.ReleaseError).message, - 'Timeout occurred while acquiring session.', - ); - } - }); - - it('should create a session if the pool is not full', async () => { - const fakeSession = createSession(); - const stub = sandbox - .stub(sessionPool, '_createSessions') - .withArgs(1) - .callsFake(() => { - // this will fire off via _createSessions - setImmediate(() => sessionPool.emit('session-available')); - return Promise.resolve(); - }); - - sessionPool.options.max = 1; - sessionPool.options.incStep = 25; - sandbox - .stub(sessionPool, '_borrowNextAvailableSession') - .returns(fakeSession); - - const session = await sessionPool._getSession(startTime); - - assert.strictEqual(session, fakeSession); - assert.strictEqual(stub.callCount, 1); - }); - - it('should create enough sessions for the minimum configured to be reached', async () => { - const fakeSession = createSession(); - const stub = sandbox - .stub(sessionPool, '_createSessions') - .withArgs(20) - .callsFake(() => { - // this will fire off via _createSessions - setImmediate(() => sessionPool.emit('session-available')); - return Promise.resolve(); - }); - - sessionPool.options.min = 20; - sessionPool.options.max = 400; - sessionPool.options.incStep = 10; - sandbox - .stub(sessionPool, '_borrowNextAvailableSession') - .returns(fakeSession); - - const session = await sessionPool._getSession(startTime); - - assert.strictEqual(session, fakeSession); - assert.strictEqual(stub.callCount, 1); - }); - - it('should wait for a pending session to become available', async () => { - const fakeSession = createSession(); - - sessionPool.options.max = 2; - sessionPool._pending = 1; - const stub = sandbox.stub(sessionPool, '_createSession').callsFake(() => { - return Promise.reject(new Error('should not be called')); - }); - sandbox - .stub(sessionPool, '_borrowNextAvailableSession') - .returns(fakeSession); - setTimeout(() => sessionPool.emit('session-available'), 100); - - const session = await sessionPool._getSession(startTime); - assert.strictEqual(session, fakeSession); - assert.strictEqual(stub.callCount, 0); - }); - - it('should return any create errors', async () => { - const error = new Error('err'); - - sessionPool.options.max = 1; - sandbox.stub(sessionPool, '_createSessions').rejects(error); - - try { - await sessionPool._getSession(startTime); - shouldNotBeCalled(); - } catch (e) { - assert.strictEqual(e, error); - } - }); - - it('should remove the available listener on error', async () => { - sessionPool.options.acquireTimeout = 100; - - const promise = sessionPool._getSession(startTime); - - assert.strictEqual(sessionPool.listenerCount('session-available'), 1); - - try { - await promise; - shouldNotBeCalled(); - } catch (e) { - assert.strictEqual(sessionPool.listenerCount('available'), 0); - } - }); - }); - - describe('_isValidSession', () => { - it('should return true if the session is good', () => { - const fakeSession = createSession('id', {lastUsed: Date.now()}); - const isValid = sessionPool._isValidSession(fakeSession); - - assert.strictEqual(isValid, true); - }); - - it('should return true if the session has gone bad', () => { - const fakeSession = createSession('id', { - lastUsed: Date.now() - 61 * 60000, - }); - const isValid = sessionPool._isValidSession(fakeSession); - - assert.strictEqual(isValid, false); - }); - }); - - describe('_ping', () => { - beforeEach(() => { - sandbox.stub(sessionPool, '_borrow'); - }); - - it('should borrow the session', async () => { - const fakeSession = createSession(); - const stub = sessionPool._borrow as sinon.SinonStub; - - stub.withArgs(fakeSession); - await sessionPool._ping(fakeSession); - - assert.strictEqual(stub.callCount, 1); - }); - - it('should discard it if expired', async () => { - const fakeSession = createSession(); - const keepAliveStub = fakeSession.keepAlive as sinon.SinonStub; - - inventory.borrowed.add(fakeSession); - sandbox.stub(sessionPool, '_isValidSession').returns(false); - await sessionPool._ping(fakeSession); - - const inPool = inventory.borrowed.has(fakeSession); - - assert.strictEqual(inPool, false); - assert.strictEqual(keepAliveStub.callCount, 0); - }); - - it('should keep alive the session then release it', async () => { - const fakeSession = createSession(); - const keepAliveStub = fakeSession.keepAlive as sinon.SinonStub; - - const releaseStub = sandbox - .stub(sessionPool, 'release') - .withArgs(fakeSession); - sandbox.stub(sessionPool, '_isValidSession').returns(true); - - await sessionPool._ping(fakeSession); - - assert.strictEqual(keepAliveStub.callCount, 1); - assert.strictEqual(releaseStub.callCount, 1); - }); - - it('should destroy the session if the ping fails', async () => { - const fakeSession = createSession(); - const keepAliveStub = fakeSession.keepAlive as sinon.SinonStub; - - keepAliveStub.rejects(); - sandbox.stub(sessionPool, '_isValidSession').returns(true); - - const destroyStub = sandbox - .stub(sessionPool, '_destroy') - .withArgs(fakeSession) - .resolves(); - - await sessionPool._ping(fakeSession); - - const inPool = inventory.borrowed.has(fakeSession); - - assert.strictEqual(inPool, false); - assert.strictEqual(destroyStub.callCount, 1); - }); - }); - - describe('_pingIdleSessions', () => { - it('should ping each idle session', async () => { - const fakeSessions = [createSession(), createSession(), createSession()]; - - const pingStub = sandbox.stub(sessionPool, '_ping').resolves(); - sandbox.stub(sessionPool, '_getIdleSessions').returns(fakeSessions); - sandbox.stub(sessionPool, '_fill').resolves(); - - await sessionPool._pingIdleSessions(); - - assert.strictEqual(pingStub.callCount, 3); - - fakeSessions.forEach((session, i) => { - const pinged = pingStub.getCall(i).args[0]; - assert.strictEqual(pinged, session); - }); - }); - - it('should fill the pool after pinging', async () => { - const fillStub = sandbox.stub(sessionPool, '_fill').resolves(); - - sandbox.stub(sessionPool, '_getIdleSessions').returns([]); - await sessionPool._pingIdleSessions(); - - assert.strictEqual(fillStub.callCount, 1); - }); - - it('should not throw error when database not found', async () => { - const fakeSessions = [createSession()]; - sandbox.stub(sessionPool, '_getIdleSessions').returns(fakeSessions); - - const error = { - code: grpc.status.NOT_FOUND, - message: 'Database not found', - } as grpc.ServiceError; - sandbox.stub(sessionPool, '_fill').rejects(error); - - try { - await sessionPool._pingIdleSessions(); - } catch (err) { - assert.ifError(err); - } - }); - }); - - describe('_release', () => { - it('should release the session', () => { - const fakeSession = createSession('id'); - - inventory.borrowed.add(fakeSession); - sessionPool._release(fakeSession); - - assert.strictEqual(inventory.borrowed.has(fakeSession), false); - assert.strictEqual(inventory.sessions.indexOf(fakeSession), 0); - }); - - it('should delete any stack traces', () => { - const id = 'abc'; - const fakeSession = createSession(id); - - sessionPool._traces.set(id, []); - sessionPool._release(fakeSession); - - assert.strictEqual(sessionPool._traces.has(id), false); - }); - }); - - describe('_startHouseKeeping', () => { - it('should set an interval to evict idle sessions', done => { - const expectedInterval = sessionPool.options.idlesAfter! * 60000; - const clock = sandbox.useFakeTimers(); - - sandbox.stub(sessionPool, '_evictIdleSessions').callsFake(done); - - sessionPool._startHouseKeeping(); - clock.tick(expectedInterval); - }); - - it('should set an interval to ping sessions', done => { - const expectedInterval = sessionPool.options.keepAlive! * 60000; - const clock = sandbox.useFakeTimers(); - - sandbox - .stub(sessionPool, '_pingIdleSessions') - .callsFake(async () => done()); - - sessionPool._startHouseKeeping(); - clock.tick(expectedInterval); - }); - }); - - describe('_stopHouseKeeping', () => { - it('should clear the intervals', () => { - sessionPool._pingHandle = setTimeout(noop, 1); - sessionPool._evictHandle = setTimeout(noop, 1); - - const fakeHandles = [sessionPool._pingHandle, sessionPool._evictHandle]; - const stub = sandbox.stub(global, 'clearInterval'); - - sessionPool._stopHouseKeeping(); - - fakeHandles.forEach((fakeHandle, i) => { - const [handle] = stub.getCall(i).args; - assert.strictEqual(handle, fakeHandle); - }); - }); - }); - - describe('trace annotations on active span', () => { - beforeEach(() => { - sessionPool.isOpen = true; - sessionPool._isValidSession = () => true; - }); - - it('annotations when acquiring a session', done => { - const topLevelSpanName = 'testSessionPool.acquire'; - startTrace(topLevelSpanName, {}, async span => { - const fakeSession = createSession(); - const now = Date.now(); - - const stub = sandbox - .stub(sessionPool, '_getSession') - .resolves(fakeSession); - const session = await sessionPool._acquire(); - const [startTime] = stub.getCall(0).args; - - assert(isAround(startTime, now)); - assert.strictEqual(session, fakeSession); - - await sessionPool._release(session); - span.end(); - - const events = span.events; - assert.strictEqual(!events, false, 'Events must be set'); - assert.strictEqual( - events.length > 0, - true, - 'Expecting at least 1 event', - ); - - // Sort the events by earliest time of occurence. - events.sort((evtA, evtB) => { - return evtA.time < evtB.time; - }); - - const gotEventNames: string[] = []; - events.forEach(event => { - gotEventNames.push(event.name); - }); - - const wantEventNames = ['Acquiring session', 'Acquired session']; - assert.deepEqual( - gotEventNames, - wantEventNames, - `Mismatched events\n\tGot: ${gotEventNames}\n\tWant: ${wantEventNames}`, - ); - - done(); - }); - }); - }); -}); - -function isAround(actual, expected) { - return actual > expected - 50 && actual < expected + 50; -} diff --git a/test/spanner.ts b/test/spanner.ts index fc14dff53..53ff76506 100644 --- a/test/spanner.ts +++ b/test/spanner.ts @@ -14,7 +14,7 @@ * limitations under the License. */ -import {after, before, beforeEach, describe, Done, it} from 'mocha'; +import {after, before, beforeEach, describe, it} from 'mocha'; import * as assert from 'assert'; import {grpc, Status, ServiceError} from 'google-gax'; // eslint-disable-next-line n/no-extraneous-import @@ -24,7 +24,6 @@ import { Instance, MutationGroup, MutationSet, - SessionPool, Snapshot, Spanner, Transaction, @@ -41,15 +40,10 @@ import {TEST_INSTANCE_NAME} from './mockserver/mockinstanceadmin'; import * as mockDatabaseAdmin from './mockserver/mockdatabaseadmin'; import * as sinon from 'sinon'; import {google} from '../protos/protos'; -import {ExecuteSqlRequest, ReadRequest, RunResponse} from '../src/transaction'; +import {ExecuteSqlRequest, RunResponse} from '../src/transaction'; import {Row} from '../src/partial-result-stream'; import {GetDatabaseOperationsOptions} from '../src/instance'; -import { - isSessionNotFoundError, - SessionLeakError, - SessionPoolExhaustedError, - SessionPoolOptions, -} from '../src/session-pool'; +import {SessionPoolOptions} from '../src/session-pool'; import {Float, Int, Json, Numeric, SpannerDate} from '../src/codec'; import * as stream from 'stream'; import * as util from 'util'; @@ -1193,7 +1187,6 @@ describe('Spanner with mock server', () => { const database = newTestDatabase({incStep: 1, min: 0}); try { const sessionFactory = database.sessionFactory_ as SessionFactory; - const pool = sessionFactory.pool_ as SessionPool; const multiplexedSession = sessionFactory.multiplexedSession_ as MultiplexedSession; const promises: Array> = []; @@ -1219,7 +1212,6 @@ describe('Spanner with mock server', () => { const database = newTestDatabase({incStep: 1, min: 0}); try { const sessionFactory = database.sessionFactory_ as SessionFactory; - const pool = sessionFactory.pool_ as SessionPool; const multiplexedSession = sessionFactory.multiplexedSession_ as MultiplexedSession; const promises: Array> = []; @@ -1990,12 +1982,9 @@ describe('Spanner with mock server', () => { sql: selectSql, } as ExecuteSqlRequest; const database = newTestDatabase(); - const pool = (database.sessionFactory_ as SessionFactory) - .pool_ as SessionPool; const multiplexedSession = (database.sessionFactory_ as SessionFactory) .multiplexedSession_ as MultiplexedSession; database.run(query, (err, resp) => { - assert.strictEqual(pool._inventory.borrowed.size, 0); // multiplexed session will get created by default assert.notEqual(multiplexedSession._multiplexedSession, null); assert.ifError(err); @@ -2006,12 +1995,9 @@ describe('Spanner with mock server', () => { it('should execute the transaction(database.getSnapshot) successfully using multiplexed session', done => { const database = newTestDatabase(); - const pool = (database.sessionFactory_ as SessionFactory) - .pool_ as SessionPool; const multiplexedSession = (database.sessionFactory_ as SessionFactory) .multiplexedSession_ as MultiplexedSession; database.getSnapshot((err, resp) => { - assert.strictEqual(pool._inventory.borrowed.size, 0); // multiplexed session will get created by default assert.notEqual(multiplexedSession._multiplexedSession, null); assert.ifError(err); @@ -2033,12 +2019,9 @@ describe('Spanner with mock server', () => { SingerId: 2, FirstName: 'Marc', }); - const pool = (database.sessionFactory_ as SessionFactory) - .pool_ as SessionPool; const multiplexedSession = (database.sessionFactory_ as SessionFactory) .multiplexedSession_ as MultiplexedSession; database.writeAtLeastOnce(mutations, (err, resp) => { - assert.strictEqual(pool._inventory.borrowed.size, 0); // multiplexed session will get created by default assert.notEqual(multiplexedSession._multiplexedSession, null); assert.ifError(err); @@ -2078,113 +2061,6 @@ describe('Spanner with mock server', () => { ); } }); - - it('should fail the transaction, if query returns session not found error', done => { - const query = { - sql: selectSql, - } as ExecuteSqlRequest; - const error = { - code: grpc.status.NOT_FOUND, - message: 'Session not found', - } as MockError; - spannerMock.setExecutionTime( - spannerMock.executeStreamingSql, - SimulatedExecutionTime.ofError(error), - ); - const database = newTestDatabase(); - database.run(query, (err, _) => { - assert.strictEqual(err!.code, error.code); - assert.strictEqual(err!.details, error.message); - done(); - }); - }); - }); - - describe('when multiplexed session is disabled for read-only', () => { - before(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; - }); - - after(() => { - delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; - }); - it('should make a request to BatchCreateSessions', async () => { - const database = newTestDatabase(); - await database.run('SELECT 1'); - const requests = spannerMock.getRequests().find(val => { - return (val as v1.BatchCreateSessionsRequest).sessionTemplate; - }) as v1.BatchCreateSessionsRequest; - assert.ok(requests, 'BatchCreateSessionsRequest should be called'); - assert.strictEqual( - requests.sessionTemplate?.multiplexed, - false, - 'Multiplexed should be false', - ); - }); - - it('should execute the transaction(database.run) successfully using regular session', done => { - const query = { - sql: selectSql, - } as ExecuteSqlRequest; - const database = newTestDatabase({min: 1, max: 1}); - const pool = (database.sessionFactory_ as SessionFactory) - .pool_ as SessionPool; - const multiplexedSession = (database.sessionFactory_ as SessionFactory) - .multiplexedSession_ as MultiplexedSession; - database.run(query, (err, resp) => { - assert.ifError(err); - assert.strictEqual(pool._inventory.sessions.length, 1); - // multiplexed session will get created by default - assert.notEqual(multiplexedSession._multiplexedSession, null); - assert.strictEqual(resp.length, 3); - done(); - }); - }); - - it('should execute the transaction(database.getSnapshot) successfully using regular session', done => { - const database = newTestDatabase({min: 1, max: 1}); - const pool = (database.sessionFactory_ as SessionFactory) - .pool_ as SessionPool; - const multiplexedSession = (database.sessionFactory_ as SessionFactory) - .multiplexedSession_ as MultiplexedSession; - database.getSnapshot((err, resp) => { - assert.ifError(err); - assert.strictEqual(pool._inventory.borrowed.size, 1); - // multiplexed session will get created by default - assert.notEqual(multiplexedSession._multiplexedSession, null); - assert(resp instanceof Snapshot); - resp.end(); - done(); - }); - }); - - it('should execute the transaction(database.writeAtLeastOnce) successfully using regular session', done => { - const database = newTestDatabase({min: 1, max: 1}); - const mutations = new MutationSet(); - mutations.upsert('Singers', { - SingerId: 1, - FirstName: 'Scarlet', - LastName: 'Terry', - }); - mutations.upsert('Singers', { - SingerId: 2, - FirstName: 'Marc', - }); - const pool = (database.sessionFactory_ as SessionFactory) - .pool_ as SessionPool; - const multiplexedSession = (database.sessionFactory_ as SessionFactory) - .multiplexedSession_ as MultiplexedSession; - database.writeAtLeastOnce(mutations, (err, resp) => { - assert.ifError(err); - assert.strictEqual(pool._inventory.borrowed.size, 1); - // multiplexed session will get created by default - assert.notEqual(multiplexedSession._multiplexedSession, null); - assert.strictEqual(typeof resp?.commitTimestamp?.nanos, 'number'); - assert.strictEqual(typeof resp?.commitTimestamp?.seconds, 'string'); - assert.strictEqual(resp?.commitStats, null); - done(); - }); - }); }); }); @@ -2192,66 +2068,9 @@ describe('Spanner with mock server', () => { describe('default session mode for partitioned ops', () => { it('should execute the transaction(database.runPartitionedUpdate) successfully using multiplexed session', done => { const database = newTestDatabase({min: 1, max: 1}); - const pool = (database.sessionFactory_ as SessionFactory) - .pool_ as SessionPool; - const multiplexedSession = (database.sessionFactory_ as SessionFactory) - .multiplexedSession_ as MultiplexedSession; - database.runPartitionedUpdate({sql: updateSql}, (err, resp) => { - assert.strictEqual(pool._inventory.borrowed.size, 0); - // multiplexed session will get created by default - assert.notEqual(multiplexedSession._multiplexedSession, null); - assert.strictEqual(resp, 2); - assert.ifError(err); - done(); - }); - }); - }); - - describe('when only GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is disabled', () => { - before(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; - }); - - after(() => { - delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; - }); - - it('should execute the transaction(database.runPartitionedUpdate) successfully using multiplexed session', done => { - const database = newTestDatabase({min: 1, max: 1}); - const pool = (database.sessionFactory_ as SessionFactory) - .pool_ as SessionPool; - const multiplexedSession = (database.sessionFactory_ as SessionFactory) - .multiplexedSession_ as MultiplexedSession; - database.runPartitionedUpdate({sql: updateSql}, (err, resp) => { - assert.strictEqual(pool._inventory.borrowed.size, 0); - // multiplexed session will get created by default - assert.notEqual(multiplexedSession._multiplexedSession, null); - assert.strictEqual(resp, 2); - assert.ifError(err); - done(); - }); - }); - }); - - describe('when only GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS is disabled', () => { - before(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS = - 'false'; - }); - - after(() => { - delete process.env - .GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS; - }); - - it('should execute the transaction(database.runPartitionedUpdate) successfully using multiplexed session', done => { - const database = newTestDatabase({min: 1, max: 1}); - const pool = (database.sessionFactory_ as SessionFactory) - .pool_ as SessionPool; const multiplexedSession = (database.sessionFactory_ as SessionFactory) .multiplexedSession_ as MultiplexedSession; database.runPartitionedUpdate({sql: updateSql}, (err, resp) => { - assert.strictEqual(pool._inventory.borrowed.size, 0); // multiplexed session will get created by default assert.notEqual(multiplexedSession._multiplexedSession, null); assert.strictEqual(resp, 2); @@ -2260,40 +2079,6 @@ describe('Spanner with mock server', () => { }); }); }); - - describe('when multiplexed session is disabled for partitioned ops', () => { - before(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS = - 'false'; - }); - - after(() => { - delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; - delete process.env - .GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS; - }); - - it('should execute the transaction(database.runPartitionedUpdate) successfully using regular/pool session', done => { - const database = newTestDatabase({min: 1, max: 1}); - const pool = (database.sessionFactory_ as SessionFactory) - .pool_ as SessionPool; - const multiplexedSession = (database.sessionFactory_ as SessionFactory) - .multiplexedSession_ as MultiplexedSession; - database.runPartitionedUpdate({sql: updateSql}, (err, resp) => { - assert.ifError(err); - assert.strictEqual(pool._inventory.sessions.length, 1); - assert.strictEqual( - pool._inventory.sessions[0].metadata.multiplexed, - false, - ); - // multiplexed session will get created by default - assert.notEqual(multiplexedSession._multiplexedSession, null); - assert.strictEqual(resp, 2); - done(); - }); - }); - }); }); describe('batch write', () => { @@ -2305,125 +2090,6 @@ describe('Spanner with mock server', () => { Name: 'One', }); const database = newTestDatabase({min: 1, max: 1}); - const pool = (database.sessionFactory_ as SessionFactory) - .pool_ as SessionPool; - const multiplexedSession = (database.sessionFactory_ as SessionFactory) - .multiplexedSession_ as MultiplexedSession; - database.commonHeaders_ = { - 'x-goog-spanner-request-id': `1.${randIdForProcess}.1.1.5.1`, - }; - database - .batchWriteAtLeastOnce([mutationGroup]) - .on('error', done) - .on('data', response => { - // ensure that response is coming - assert.notEqual(response.commitTimestamp, null); - // multiplexed session will get created by default - assert.notEqual(multiplexedSession._multiplexedSession, null); - - // regular session will not get created since GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is not disabled - assert.strictEqual(pool._inventory.sessions.length, 0); - }) - .on('end', done); - }); - }); - - describe('when only GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is disabled', () => { - before(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; - }); - - after(() => { - delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; - }); - - it('should use multiplexed session', done => { - const mutationGroup = new MutationGroup(); - mutationGroup.upsert('FOO', { - Id: '1', - Name: 'One', - }); - const database = newTestDatabase({min: 1, max: 1}); - const pool = (database.sessionFactory_ as SessionFactory) - .pool_ as SessionPool; - const multiplexedSession = (database.sessionFactory_ as SessionFactory) - .multiplexedSession_ as MultiplexedSession; - database.commonHeaders_ = { - 'x-goog-spanner-request-id': `1.${randIdForProcess}.1.1.5.1`, - }; - database - .batchWriteAtLeastOnce([mutationGroup]) - .on('error', done) - .on('data', response => { - // ensure that response is coming - assert.notEqual(response.commitTimestamp, null); - // multiplexed session will get created by default during client initialization - assert.notEqual(multiplexedSession._multiplexedSession, null); - // session pool will not get created since GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is not false - assert.strictEqual(pool._inventory.sessions.length, 0); - }) - .on('end', done); - }); - }); - - describe('when only GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW is disabled', () => { - before(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW = 'false'; - }); - - after(() => { - delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW; - }); - - it('should use multiplexed session', done => { - const mutationGroup = new MutationGroup(); - mutationGroup.upsert('FOO', { - Id: '1', - Name: 'One', - }); - const database = newTestDatabase({min: 1, max: 1}); - const pool = (database.sessionFactory_ as SessionFactory) - .pool_ as SessionPool; - const multiplexedSession = (database.sessionFactory_ as SessionFactory) - .multiplexedSession_ as MultiplexedSession; - database.commonHeaders_ = { - 'x-goog-spanner-request-id': `1.${randIdForProcess}.1.1.5.1`, - }; - database - .batchWriteAtLeastOnce([mutationGroup]) - .on('error', done) - .on('data', response => { - // ensure that response is not null - assert.notEqual(response.commitTimestamp, null); - // multiplexed session will get created by default - assert.notEqual(multiplexedSession._multiplexedSession, null); - // session pool will not get created since GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS is not false - assert.strictEqual(pool._inventory.sessions.length, 0); - }) - .on('end', done); - }); - }); - - describe('when multiplexed session is disabled for r/w', () => { - before(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW = 'false'; - }); - - after(() => { - delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; - delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW; - }); - - it('should use regular session', done => { - const mutationGroup = new MutationGroup(); - mutationGroup.upsert('FOO', { - Id: '1', - Name: 'One', - }); - const database = newTestDatabase({min: 1, max: 1}); - const pool = (database.sessionFactory_ as SessionFactory) - .pool_ as SessionPool; const multiplexedSession = (database.sessionFactory_ as SessionFactory) .multiplexedSession_ as MultiplexedSession; database.commonHeaders_ = { @@ -2435,11 +2101,6 @@ describe('Spanner with mock server', () => { .on('data', response => { // ensure that response is coming assert.notEqual(response.commitTimestamp, null); - assert.strictEqual( - Array.from(pool._inventory.borrowed)[0].metadata.multiplexed, - false, - ); - assert.strictEqual(pool._inventory.borrowed.size, 1); // multiplexed session will get created by default assert.notEqual(multiplexedSession._multiplexedSession, null); }) @@ -2665,1117 +2326,168 @@ describe('Spanner with mock server', () => { optimizerVersion: 'version-in-db-opts', optimizerStatisticsPackage: 'stats-package-in-db-opts', }); - try { - const [snapshot] = await database.getSnapshot(); - await snapshot.run(selectSql); - verifyQueryOptions(OPTIMIZER_VERSION, OPTIMIZER_STATISTICS_PACKAGE); - await snapshot.end(); - } finally { - await database.close(); - } - }); - - it('transaction.run', done => { - const database = newTestDatabase(); - database.runTransaction(async (err, transaction) => { - assert.ifError(err); - await transaction!.run(selectSql); - verifyQueryOptions(OPTIMIZER_VERSION, OPTIMIZER_STATISTICS_PACKAGE); - await transaction!.commit(); - await database.close(); - done(); - }); - }); - - it('transaction.run with query-options', done => { - const database = newTestDatabase(); - database.runTransaction(async (err, transaction) => { - assert.ifError(err); - await transaction!.run({ - sql: selectSql, - queryOptions: { - optimizerVersion: 'version-on-query', - optimizerStatisticsPackage: 'stats-package-on-query', - }, - }); - verifyQueryOptions('version-on-query', 'stats-package-on-query'); - await transaction!.commit(); - await database.close(); - done(); - }); - }); - - it('transaction.run with database-with-query-options', done => { - const database = newTestDatabase(undefined, { - optimizerVersion: 'version-in-db-opts', - optimizerStatisticsPackage: 'stats-package-in-db-opts', - }); - database.runTransaction(async (err, transaction) => { - assert.ifError(err); - await transaction!.run(selectSql); - verifyQueryOptions(OPTIMIZER_VERSION, OPTIMIZER_STATISTICS_PACKAGE); - await transaction!.commit(); - await database.close(); - done(); - }); - }); - - it('async transaction.run', async () => { - const database = newTestDatabase(); - try { - await database.runTransactionAsync(async transaction => { - await transaction.run(selectSql); - verifyQueryOptions(OPTIMIZER_VERSION, OPTIMIZER_STATISTICS_PACKAGE); - await transaction.commit(); - }); - } finally { - await database.close(); - } - }); - - it('async transaction.run with query-options', async () => { - const database = newTestDatabase(); - try { - await database.runTransactionAsync(async transaction => { - await transaction.run({ - sql: selectSql, - queryOptions: { - optimizerVersion: 'version-on-query', - optimizerStatisticsPackage: 'stats-package-on-query', - }, - }); - verifyQueryOptions('version-on-query', 'stats-package-on-query'); - await transaction.commit(); - }); - } finally { - await database.close(); - } - }); - - it('async transaction.run with database-with-query-options', async () => { - const database = newTestDatabase(undefined, { - optimizerVersion: 'version-in-db-opts', - optimizerStatisticsPackage: 'stats-package-in-db-opts', - }); - try { - await database.runTransactionAsync(async transaction => { - await transaction.run(selectSql); - verifyQueryOptions(OPTIMIZER_VERSION, OPTIMIZER_STATISTICS_PACKAGE); - await transaction.commit(); - }); - } finally { - await database.close(); - } - }); - }); - - describe('on database options', () => { - const OPTIMIZER_VERSION = '40'; - const OPTIMIZER_STATISTICS_PACKAGE = 'auto_20191128_14_47_22UTC'; - - // Request a database with default query options. - function newTestDatabase(options?: SessionPoolOptions): Database { - return instance.database(`database-${dbCounter++}`, options, { - optimizerVersion: OPTIMIZER_VERSION, - optimizerStatisticsPackage: OPTIMIZER_STATISTICS_PACKAGE, - } as IQueryOptions); - } - - it('database.run', async () => { - const database = newTestDatabase(); - try { - await database.run(selectSql); - verifyQueryOptions(OPTIMIZER_VERSION, OPTIMIZER_STATISTICS_PACKAGE); - } finally { - await database.close(); - } - }); - - it('snapshot.run', async () => { - const database = newTestDatabase(); - try { - const [snapshot] = await database.getSnapshot(); - await snapshot.run(selectSql); - verifyQueryOptions(OPTIMIZER_VERSION, OPTIMIZER_STATISTICS_PACKAGE); - await snapshot.end(); - } finally { - await database.close(); - } - }); - - it('transaction.run', done => { - const database = newTestDatabase(); - database.runTransaction(async (err, transaction) => { - assert.ifError(err); - await transaction!.run(selectSql); - verifyQueryOptions(OPTIMIZER_VERSION, OPTIMIZER_STATISTICS_PACKAGE); - await transaction!.commit(); - await database.close(); - done(); - }); - }); - - it('async transaction.run', async () => { - const database = newTestDatabase(); - try { - await database.runTransactionAsync(async transaction => { - await transaction.run(selectSql); - verifyQueryOptions(OPTIMIZER_VERSION, OPTIMIZER_STATISTICS_PACKAGE); - await transaction.commit(); - }); - } finally { - await database.close(); - } - }); - }); - }); - - describe('session-not-found', () => { - before(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS = - 'false'; - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW = 'false'; - }); - - after(() => { - delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; - delete process.env - .GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS; - delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW; - }); - - it('should retry "Session not found" errors on Database.run()', done => { - const db = newTestDatabase({ - incStep: 1, - min: 0, - }); - spannerMock.setExecutionTime( - spannerMock.executeStreamingSql, - SimulatedExecutionTime.ofError({ - code: grpc.status.NOT_FOUND, - message: 'Session not found', - } as MockError), - ); - db.run(selectSql, (err, rows) => { - if (err) { - assert.fail(err); - } - assert.strictEqual(rows!.length, 3); - db.getSessions((err, results) => { - if (err) { - assert.fail(err); - } - // The mock server should have exactly 3 sessions. - // Two from session pool where, the first one was - // removed from the session pool because of the simulated - // 'Session not found' error. The second one was created by the retry. - // As we only simulate the 'Session not found' error, the first - // session is still present on the mock server. - // one session will be multiplexed session. - assert.strictEqual(results!.length, 3); - if (results!.length !== 3) { - done(); - } - db.close() - .then(() => done()) - .catch(err => assert.fail(err)); - }); - }); - }); - - it('should retry "Session not found" errors for Database.runStream()', () => { - const db = newTestDatabase(); - spannerMock.setExecutionTime( - spannerMock.executeStreamingSql, - SimulatedExecutionTime.ofError({ - code: grpc.status.NOT_FOUND, - message: 'Session not found', - } as MockError), - ); - let rowCount = 0; - db.runStream(selectSql) - .on('data', () => rowCount++) - .on('error', err => { - assert.fail(err); - }) - .on('end', () => { - assert.strictEqual(rowCount, 3); - }); - }); - - it('should retry multiple "Session not found" errors on Database.run()', done => { - const db = newTestDatabase(); - for (let i = 0; i < 10; i++) { - spannerMock.setExecutionTime( - spannerMock.executeStreamingSql, - SimulatedExecutionTime.ofError({ - code: grpc.status.NOT_FOUND, - message: 'Session not found', - } as MockError), - ); - } - db.run(selectSql, (err, rows) => { - if (err) { - assert.fail(err); - } - assert.strictEqual(rows!.length, 3); - done(); - }); - }); - - it('should not retry "Session not found" errors halfway a stream', done => { - const db = newTestDatabase(); - spannerMock.setExecutionTime( - spannerMock.executeStreamingSql, - SimulatedExecutionTime.ofError({ - code: grpc.status.NOT_FOUND, - message: 'Session not found', - streamIndex: 1, - } as MockError), - ); - db.run(selectSql, err => { - if (err) { - assert.ok(isSessionNotFoundError(err)); - done(); - return; - } - assert.fail('Missing expected "Session not found" error'); - }); - }); - - it('should retry "Session not found" errors for Database.getSnapshot() with callbacks', done => { - const db = newTestDatabase(); - const sessionNotFound = { - code: grpc.status.NOT_FOUND, - message: 'Session not found', - } as MockError; - // The beginTransaction call will fail 3 times with 'Session not found' - // before succeeding. - spannerMock.setExecutionTime( - spannerMock.beginTransaction, - SimulatedExecutionTime.ofErrors([ - sessionNotFound, - sessionNotFound, - sessionNotFound, - ]), - ); - db.getSnapshot((err, snapshot) => { - assert.ifError(err); - snapshot!.run(selectSql, (err, rows) => { - assert.ifError(err); - assert.strictEqual(rows.length, 3); - snapshot!.end(); - db.close(done); - }); - }); - }); - - it('should retry "Session not found" errors for a query on a session on Database.runTransaction()', done => { - const db = newTestDatabase({min: 1, incStep: 1}); - const pool = db.pool_ as SessionPool; - // Wait until one session with a transaction has been created. - pool.once('available', () => { - assert.strictEqual(pool.size, 1); - spannerMock.setExecutionTime( - spannerMock.executeStreamingSql, - SimulatedExecutionTime.ofError({ - code: grpc.status.NOT_FOUND, - message: 'Session not found', - } as MockError), - ); - runTransactionWithExpectedSessionRetry(db, done); - }); - }); - - function runTransactionWithExpectedSessionRetry(db: Database, done: Done) { - db.runTransaction((err, transaction) => { - assert.ifError(err); - transaction!.run(selectSql, (err, rows) => { - assert.ifError(err); - assert.strictEqual(rows.length, 3); - // Verify that the server has two sessions: The first one was marked - // as 'not found' by the client because of the mocked error, and a - // second one that was created as a result of the retry. - db.getSessions((err, sessions) => { - assert.ifError(err); - // sessions length is 3 as the list will contain default multiplexed session as well. - assert.strictEqual(sessions!.length, 3); - transaction!.commit(err => { - assert.ifError(err); - db.close(done); - }); - }); - }); - }); - } - - it('should retry "Session not found" errors for Commit on a session on Database.runTransaction()', done => { - const db = newTestDatabase({min: 1, incStep: 1}); - const pool = db.pool_ as SessionPool; - // Wait until one session with a transaction has been created. - pool.once('available', () => { - assert.strictEqual(pool.size, 1); - spannerMock.setExecutionTime( - spannerMock.commit, - SimulatedExecutionTime.ofError({ - code: grpc.status.NOT_FOUND, - message: 'Session not found', - } as MockError), - ); - db.runTransaction((err, transaction) => { - assert.ifError(err); - transaction!.insert('FOO', {Id: 1, Name: 'foo'}); - transaction!.commit(err => { - assert.ifError(err); - db.getSessions((err, sessions) => { - assert.ifError(err); - // sessions length is 3 as the list will contain default multiplexed session as well. - assert.strictEqual(sessions!.length, 3); - db.close(done); - }); - }); - }); - }); - }); - - it('should retry "Session not found" errors for Database.getSnapshot()', done => { - const db = newTestDatabase(); - spannerMock.setExecutionTime( - spannerMock.beginTransaction, - SimulatedExecutionTime.ofError({ - code: grpc.status.NOT_FOUND, - message: 'Session not found', - } as MockError), - ); - db.getSnapshot() - .then(response => { - const [snapshot] = response; - snapshot - .run(selectSql) - .then(response => { - const [rows] = response; - assert.strictEqual(rows.length, 3); - snapshot.end(); - db.close(done); - }) - .catch(done); - }) - .catch(done); - }); - - it('should retry "Session not found" errors for runUpdate on a session on Database.runTransaction()', done => { - const db = newTestDatabase({min: 1, incStep: 1}); - const pool = db.pool_ as SessionPool; - // Wait until one session with a transaction has been created. - pool.once('available', () => { - assert.strictEqual(pool.size, 1); - spannerMock.setExecutionTime( - spannerMock.executeStreamingSql, - SimulatedExecutionTime.ofError({ - code: grpc.status.NOT_FOUND, - message: 'Session not found', - } as MockError), - ); - db.runTransaction((err, transaction) => { - assert.ifError(err); - transaction!.runUpdate(insertSql, (err, updateCount) => { - assert.ifError(err); - assert.strictEqual(updateCount, 1); - transaction!.commit(err => { - assert.ifError(err); - db.getSessions((err, sessions) => { - assert.ifError(err); - // sessions length is 3 as the list will contain default multiplexed session as well. - assert.strictEqual(sessions!.length, 3); - db.close(done); - }); - }); - }); - }); - }); - }); - - it('should retry "Session not found" errors for executeBatchDml on a session on Database.runTransaction()', done => { - const db = newTestDatabase({min: 1, incStep: 1}); - const pool = db.pool_ as SessionPool; - // Wait until one session with a transaction has been created. - pool.once('available', () => { - assert.strictEqual(pool.size, 1); - spannerMock.setExecutionTime( - spannerMock.executeBatchDml, - SimulatedExecutionTime.ofError({ - code: grpc.status.NOT_FOUND, - message: 'Session not found', - } as MockError), - ); - db.runTransaction((err, transaction) => { - assert.ifError(err); - transaction!.batchUpdate( - [insertSql, insertSql], - (err, updateCounts) => { - assert.ifError(err); - assert.deepStrictEqual(updateCounts, [1, 1]); - transaction!.commit(err => { - assert.ifError(err); - db.getSessions((err, sessions) => { - assert.ifError(err); - // sessions length is 3 as the list will contain default multiplexed session as well. - assert.strictEqual(sessions!.length, 3); - db.close(done); - }); - }); - }, - ); - }); - }); - }); - - it('should retry "Session not found" errors for a query on a session on Database.runTransactionAsync()', done => { - const db = newTestDatabase({min: 1, incStep: 1}); - const pool = db.pool_ as SessionPool; - // Wait until one session with a transaction has been created. - pool.once('available', () => { - assert.strictEqual(pool.size, 1); - spannerMock.setExecutionTime( - spannerMock.executeStreamingSql, - SimulatedExecutionTime.ofError({ - code: grpc.status.NOT_FOUND, - message: 'Session not found', - } as MockError), - ); - runAsyncTransactionWithExpectedSessionRetry(db).then(done).catch(done); - }); - }); - - async function runAsyncTransactionWithExpectedSessionRetry(db: Database) { - try { - await db.runTransactionAsync(async (transaction): Promise => { - try { - const [rows] = await transaction.run(selectSql); - assert.strictEqual(rows.length, 3); - const [sessions] = await db.getSessions(); - // sessions length is 3 as the list will contain default multiplexed session as well. - assert.strictEqual(sessions!.length, 3); - await transaction.commit(); - return Promise.resolve(); - } catch (e) { - return Promise.reject(e); - } - }); - await db.close(); - } catch (e) { - assert.fail(e as ServiceError); - } - } - - it('should retry "Session not found" errors for Commit on a session on Database.runTransactionAsync()', done => { - const db = newTestDatabase({min: 1, incStep: 1}); - const pool = db.pool_ as SessionPool; - // Wait until one session with a transaction has been created. - pool.once('available', async () => { - assert.strictEqual(pool.size, 1); - spannerMock.setExecutionTime( - spannerMock.commit, - SimulatedExecutionTime.ofError({ - code: grpc.status.NOT_FOUND, - message: 'Session not found', - } as MockError), - ); - try { - await db - .runTransactionAsync(async (transaction): Promise => { - transaction.insert('FOO', {Id: 1, Name: 'foo'}); - await transaction.commit(); - const [sessions] = await db.getSessions(); - // sessions length is 3 as the list will contain default multiplexed session as well. - assert.strictEqual(sessions!.length, 3); - }) - .catch(assert.ifError); - await db.close(); - } catch (e) { - done(e); - return; - } - done(); - }); - }); - - it('should retry "Session not found" errors for runUpdate on a session on Database.runTransactionAsync()', done => { - const db = newTestDatabase({min: 1, incStep: 1}); - const pool = db.pool_ as SessionPool; - // Wait until one session with a transaction has been created. - pool.once('available', async () => { - assert.strictEqual(pool.size, 1); - spannerMock.setExecutionTime( - spannerMock.executeStreamingSql, - SimulatedExecutionTime.ofError({ - code: grpc.status.NOT_FOUND, - message: 'Session not found', - } as MockError), - ); - try { - await db - .runTransactionAsync(async (transaction): Promise => { - const [updateCount] = await transaction.runUpdate(insertSql); - assert.strictEqual(updateCount, 1); - await transaction.commit(); - const [sessions] = await db.getSessions(); - // sessions length is 3 as the list will contain default multiplexed session as well. - assert.strictEqual(sessions!.length, 3); - }) - .catch(assert.ifError); - await db.close(); - } catch (e) { - done(e); - return; - } - done(); - }); - }); - - it('should retry "Session not found" errors for executeBatchDml on a session on Database.runTransactionAsync()', done => { - const db = newTestDatabase({min: 1, incStep: 1}); - const pool = db.pool_ as SessionPool; - // Wait until one session with a transaction has been created. - pool.once('available', async () => { - assert.strictEqual(pool.size, 1); - spannerMock.setExecutionTime( - spannerMock.executeBatchDml, - SimulatedExecutionTime.ofError({ - code: grpc.status.NOT_FOUND, - message: 'Session not found', - } as MockError), - ); - try { - await db - .runTransactionAsync(async (transaction): Promise => { - const [updateCounts] = await transaction.batchUpdate([ - insertSql, - insertSql, - ]); - assert.deepStrictEqual(updateCounts, [1, 1]); - await transaction.commit(); - const [sessions] = await db.getSessions(); - // sessions length is 3 as the list will contain default multiplexed session as well. - assert.strictEqual(sessions!.length, 3); - }) - .catch(assert.ifError); - await db.close(); - } catch (e) { - done(e); - return; - } - done(); - }); - }); - }); - - describe('session-pool', () => { - before(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS = - 'false'; - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW = 'false'; - }); - - after(() => { - delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; - delete process.env - .GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_PARTITIONED_OPS; - delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW; - }); - - it('should execute table mutations without leaking sessions', async () => { - const database = newTestDatabase(); - try { - await database.table('foo').upsert({id: 1, name: 'bar'}); - } finally { - await database.close(); - } - }); - - it('should throw an error with a stacktrace when leaking a session', async () => { - await testLeakSession(); - }); - - async function testLeakSession() { - // The query to execute - const query = { - sql: selectSql, - }; - const db = newTestDatabase(); - await db - .getSnapshot({strong: true, returnReadTimestamp: true}) - .then(([tx]) => { - return tx.run(query); - }) - .then(([rows]) => { - // Assert that we get all results from the server. - assert.strictEqual(rows.length, 3); - // Note that we do not call transaction.end(). This will cause a session leak. - }) - .catch(reason => { - assert.fail(reason); - }); - await db - .close() - .then(() => { - assert.fail('Missing expected SessionLeakError'); - }) - .catch((reason: SessionLeakError) => { - assert.strictEqual(reason.name, 'SessionLeakError', reason); - assert.strictEqual(reason.messages.length, 1); - assert.ok(reason.messages[0].indexOf('testLeakSession') > -1); - }); - } - - it('should reuse sessions', async () => { - const database = newTestDatabase({incStep: 1, min: 0}); - try { - await verifyReadSessionReuse(database); - } finally { - await database.close(); - } - }); - - it('should reuse sessions when fail=true', async () => { - const db = newTestDatabase({ - min: 0, - max: 10, - incStep: 1, - concurrency: 5, - fail: true, - }); - try { - await verifyReadSessionReuse(db); - } finally { - await db.close(); - } - }); - - async function verifyReadSessionReuse(database: Database) { - // The query to execute - const query = { - sql: selectSql, - }; - const pool = database.pool_ as SessionPool; - let sessionId = ''; - for (let i = 0; i < 10; i++) { - const [rows] = await database.run(query); - assert.strictEqual(rows.length, 3); - rows.forEach(() => {}); - assert.strictEqual(pool.size, 1); - if (i > 0) { - assert.strictEqual(pool._inventory.sessions[0].id, sessionId); - } - sessionId = pool._inventory.sessions[0].id; - } - } - - it('should throw SessionPoolExhaustedError with stacktraces when pool is exhausted', async () => { - await testSessionPoolExhaustedError(); - }); - - async function testSessionPoolExhaustedError() { - const database = newTestDatabase({ - incStep: 1, - min: 0, - max: 1, - fail: true, - }); - try { - const [tx1] = await database.getSnapshot(); - try { - await database.getSnapshot(); - assert.fail('missing expected exception'); - } catch (e) { - assert.strictEqual( - (e as ServiceError).name, - SessionPoolExhaustedError.name, - ); - const exhausted = e as SessionPoolExhaustedError; - assert.ok(exhausted.messages); - assert.strictEqual(exhausted.messages.length, 1); - assert.ok( - exhausted.messages[0].indexOf('testSessionPoolExhaustedError') > -1, - ); - } - tx1.end(); - } finally { - await database.close(); - } - } - - it('should reuse sessions after executing invalid sql', async () => { - // The query to execute - const requestIDRegex = new RegExp(`1.${randIdForProcess}.1.1.\\d+.1`); - const query = { - sql: invalidSql, - }; - const database = newTestDatabase({incStep: 1, min: 0}); - try { - const pool = database.pool_ as SessionPool; - for (let i = 0; i < 10; i++) { - try { - const [rows] = await database.run(query); - assert.fail(`missing expected exception, got ${rows.length} rows`); - } catch (e) { - assert.strictEqual( - (e as ServiceError).message, - `${grpc.status.NOT_FOUND} NOT_FOUND: ${fooNotFoundErr.message}`, - ); - assert.deepStrictEqual( - (e as RequestIDError).requestID.match(requestIDRegex) !== null, - true, - ); - } - } - assert.strictEqual(pool.size, 1); - } finally { - await database.close(); - } - }); - - it('should reuse sessions after executing streaming sql', async () => { - // The query to execute - const query = { - sql: selectSql, - }; - const database = newTestDatabase({incStep: 1, min: 0}); - try { - const pool = database.pool_ as SessionPool; - for (let i = 0; i < 10; i++) { - const rowCount = await getRowCountFromStreamingSql(database, query); - assert.strictEqual(rowCount, 3); - } - assert.strictEqual(pool.size, 1); - } finally { - await database.close(); - } - }); - - it('should reuse sessions after executing an invalid streaming sql', async () => { - // The query to execute - const requestIDRegex = new RegExp(`1.${randIdForProcess}.1.1.\\d+.1`); - const query = { - sql: invalidSql, - }; - const database = newTestDatabase({incStep: 1, min: 0}); - try { - const pool = database.pool_ as SessionPool; - for (let i = 0; i < 10; i++) { - try { - const rowCount = await getRowCountFromStreamingSql(database, query); - assert.fail(`missing expected exception, got ${rowCount}`); - } catch (e) { - assert.strictEqual( - (e as ServiceError).message, - `${grpc.status.NOT_FOUND} NOT_FOUND: ${fooNotFoundErr.message}`, - ); - assert.deepStrictEqual( - (e as RequestIDError).requestID.match(requestIDRegex) !== null, - true, - ); - } - } - assert.strictEqual(pool.size, 1); - } finally { - await database.close(); - } - }); - - it('should reuse write sessions', async () => { - const database = newTestDatabase({incStep: 1, min: 0}); - try { - await verifyWriteSessionReuse(database); - } finally { - await database.close(); - } - }); - - it('should reuse write sessions when fail=true', async () => { - const db = newTestDatabase({ - min: 0, - max: 10, - incStep: 1, - concurrency: 5, - fail: true, + try { + const [snapshot] = await database.getSnapshot(); + await snapshot.run(selectSql); + verifyQueryOptions(OPTIMIZER_VERSION, OPTIMIZER_STATISTICS_PACKAGE); + await snapshot.end(); + } finally { + await database.close(); + } }); - try { - await verifyWriteSessionReuse(db); - } finally { - await db.close(); - } - }); - async function verifyWriteSessionReuse(database: Database) { - const update = { - sql: insertSql, - }; - const pool = database.pool_ as SessionPool; - for (let i = 0; i < 10; i++) { - await executeSimpleUpdate(database, update); - // The pool should not contain more sessions than the number of transactions that we have executed. - // The exact number depends on the time needed to prepare new transactions, as checking in a read/write - // transaction to the pool will cause the session to be prepared with a read/write transaction before it is added - // to the list of available sessions. - assert.ok(pool.size <= i + 1); - } - } + it('transaction.run', done => { + const database = newTestDatabase(); + database.runTransaction(async (err, transaction) => { + assert.ifError(err); + await transaction!.run(selectSql); + verifyQueryOptions(OPTIMIZER_VERSION, OPTIMIZER_STATISTICS_PACKAGE); + await transaction!.commit(); + await database.close(); + done(); + }); + }); - it('should re-use write session as read-session', async () => { - const database = newTestDatabase({incStep: 1, max: 1}); - const pool = database.pool_ as SessionPool; - try { - // Execute a simple read/write transaction to create 1 write session. - const w = executeSimpleUpdate(database, updateSql); - const r = database.run(selectSql); - await Promise.all([w, r]); - assert.strictEqual(pool.size, 1); - } finally { - await database.close(); - } - }); + it('transaction.run with query-options', done => { + const database = newTestDatabase(); + database.runTransaction(async (err, transaction) => { + assert.ifError(err); + await transaction!.run({ + sql: selectSql, + queryOptions: { + optimizerVersion: 'version-on-query', + optimizerStatisticsPackage: 'stats-package-on-query', + }, + }); + verifyQueryOptions('version-on-query', 'stats-package-on-query'); + await transaction!.commit(); + await database.close(); + done(); + }); + }); - it('should fail on session pool exhaustion and fail=true', async () => { - const database = newTestDatabase({ - max: 1, - incStep: 1, - fail: true, + it('transaction.run with database-with-query-options', done => { + const database = newTestDatabase(undefined, { + optimizerVersion: 'version-in-db-opts', + optimizerStatisticsPackage: 'stats-package-in-db-opts', + }); + database.runTransaction(async (err, transaction) => { + assert.ifError(err); + await transaction!.run(selectSql); + verifyQueryOptions(OPTIMIZER_VERSION, OPTIMIZER_STATISTICS_PACKAGE); + await transaction!.commit(); + await database.close(); + done(); + }); }); - let tx1; - try { + + it('async transaction.run', async () => { + const database = newTestDatabase(); try { - [tx1] = await database.getSnapshot(); - await database.getSnapshot(); - assert.fail('missing expected exception'); - } catch (e) { - assert.strictEqual( - (e as ServiceError).message, - 'No resources available.', - ); - } - } finally { - if (tx1) { - tx1.end(); + await database.runTransactionAsync(async transaction => { + await transaction.run(selectSql); + verifyQueryOptions(OPTIMIZER_VERSION, OPTIMIZER_STATISTICS_PACKAGE); + await transaction.commit(); + }); + } finally { + await database.close(); } - await database.close(); - } - }); - - it('should pre-fill session pool', async () => { - const database = newTestDatabase({ - min: 100, - max: 200, }); - const pool = database.pool_ as SessionPool; - const expectedAmount = pool.options.min!; - assert.strictEqual(pool.size, expectedAmount); - // Wait until all sessions have been created and prepared. - const started = new Date().getTime(); - while ( - pool._inventory.sessions.length < expectedAmount && - new Date().getTime() - started < 1000 - ) { - await sleep(1); - } - await database.close(); - }); - it('should use pre-filled session pool', async () => { - const database = newTestDatabase({ - min: 100, - max: 200, + it('async transaction.run with query-options', async () => { + const database = newTestDatabase(); + try { + await database.runTransactionAsync(async transaction => { + await transaction.run({ + sql: selectSql, + queryOptions: { + optimizerVersion: 'version-on-query', + optimizerStatisticsPackage: 'stats-package-on-query', + }, + }); + verifyQueryOptions('version-on-query', 'stats-package-on-query'); + await transaction.commit(); + }); + } finally { + await database.close(); + } }); - const pool = database.pool_ as SessionPool; - const expectedAmount = pool.options.min!; - // Start executing a query. This query should use one of the sessions that - // has been pre-filled into the pool. - const [rows] = await database.run(selectSql); - assert.strictEqual(rows.length, 3); - // Wait until all sessions have been created and prepared. - const started = new Date().getTime(); - while ( - pool._inventory.sessions.length < expectedAmount && - new Date().getTime() - started < 1000 - ) { - await sleep(1); - } - assert.strictEqual(pool.size, expectedAmount); - assert.strictEqual(pool._inventory.sessions.length, expectedAmount); - await database.close(); - }); - - it('should propagate database not found errors', async () => { - spannerMock.setExecutionTime( - spannerMock.batchCreateSessions, - // Two errors; one for the initial _fill of the session pool, and one - // for the query. - SimulatedExecutionTime.ofErrors([ - { - code: Status.NOT_FOUND, - message: 'Database not found', - }, - { - code: Status.NOT_FOUND, - message: 'Database not found', - }, - ] as MockError[]), - ); - const database = newTestDatabase(); - try { - await database.run(selectSql); - assert.fail('missing expected error'); - } catch (err) { - assert.strictEqual((err as ServiceError).code, Status.NOT_FOUND); - } finally { - await database.close(); - } - }); - it('should not propagate instance and database not found errors for SessionPoolOptions.min > 0', async () => { - for (const msg of ['Instance not found', 'Database not found']) { - spannerMock.setExecutionTime( - spannerMock.batchCreateSessions, - SimulatedExecutionTime.ofErrors([ - { - code: Status.NOT_FOUND, - message: msg, - }, - ] as MockError[]), - ); + it('async transaction.run with database-with-query-options', async () => { + const database = newTestDatabase(undefined, { + optimizerVersion: 'version-in-db-opts', + optimizerStatisticsPackage: 'stats-package-in-db-opts', + }); try { - const database = newTestDatabase({ - incStep: 1, - min: 25, - max: 400, + await database.runTransactionAsync(async transaction => { + await transaction.run(selectSql); + verifyQueryOptions(OPTIMIZER_VERSION, OPTIMIZER_STATISTICS_PACKAGE); + await transaction.commit(); }); - const response = await database.create(); - assert.ok(response); - const [rows] = await database.run(selectSql); - assert.strictEqual(rows.length, 3); - // Make sure the pool of the newly created database is filled. - const pool = database.pool_ as SessionPool; - assert.strictEqual(pool.size, 25); + } finally { await database.close(); - } catch (err) { - assert.fail(err as ServiceError); } - } + }); }); - it('should propagate permission denied errors on initialization', async () => { - spannerMock.setExecutionTime( - spannerMock.batchCreateSessions, - SimulatedExecutionTime.ofErrors([ - { - code: Status.PERMISSION_DENIED, - message: 'Needs permission', - }, - { - code: Status.PERMISSION_DENIED, - message: 'Needs permission', - }, - ] as MockError[]), - ); - const database = newTestDatabase().on('error', err => { - assert.strictEqual(err.code, Status.PERMISSION_DENIED); - }); - try { - await database.run(selectSql); - assert.fail('missing expected error'); - } catch (err) { - assert.strictEqual( - (err as ServiceError).code, - Status.PERMISSION_DENIED, - ); - } finally { - await database.close(); + describe('on database options', () => { + const OPTIMIZER_VERSION = '40'; + const OPTIMIZER_STATISTICS_PACKAGE = 'auto_20191128_14_47_22UTC'; + + // Request a database with default query options. + function newTestDatabase(options?: SessionPoolOptions): Database { + return instance.database(`database-${dbCounter++}`, options, { + optimizerVersion: OPTIMIZER_VERSION, + optimizerStatisticsPackage: OPTIMIZER_STATISTICS_PACKAGE, + } as IQueryOptions); } - }); - it('should create new session when numWaiters >= pending', async () => { - const database = newTestDatabase({ - min: 1, - max: 10, - incStep: 1, + it('database.run', async () => { + const database = newTestDatabase(); + try { + await database.run(selectSql); + verifyQueryOptions(OPTIMIZER_VERSION, OPTIMIZER_STATISTICS_PACKAGE); + } finally { + await database.close(); + } }); - const pool = database.pool_ as SessionPool; - // Start executing a query. This query should use the one session that is - // being pre-filled into the pool. - const promise1 = database.run(selectSql); - // Start executing another query. This query should initiate the creation - // of a new session. - const promise2 = database.run(selectSql); - const rows = await Promise.all([promise1, promise2]); - assert.strictEqual(pool.size, 2); - assert.strictEqual(rows[0][0].length, 3); - assert.strictEqual(rows[1][0].length, 3); - await database.close(); - }); - it('should respect options.incStep', async () => { - const database = newTestDatabase({ - min: 100, - max: 400, - incStep: 25, + it('snapshot.run', async () => { + const database = newTestDatabase(); + try { + const [snapshot] = await database.getSnapshot(); + await snapshot.run(selectSql); + verifyQueryOptions(OPTIMIZER_VERSION, OPTIMIZER_STATISTICS_PACKAGE); + await snapshot.end(); + } finally { + await database.close(); + } }); - const pool = database.pool_ as SessionPool; - assert.strictEqual(pool.size, pool.options.min); - // Request options.min + 1 sessions. - const snapshots: Snapshot[] = []; - for (let i = 0; i < pool.options.min! + 1; i++) { - const [snapshot] = await database.getSnapshot(); - snapshots.unshift(snapshot); - } - // The pool should create a batch of sessions. - assert.strictEqual(pool.size, pool.options.min! + pool.options.incStep!); - for (const s of snapshots) { - s.end(); - } - await database.close(); - }); - it('should respect options.max', async () => { - const database = newTestDatabase({ - min: 0, - max: 3, - incStep: 2, + it('transaction.run', done => { + const database = newTestDatabase(); + database.runTransaction(async (err, transaction) => { + assert.ifError(err); + await transaction!.run(selectSql); + verifyQueryOptions(OPTIMIZER_VERSION, OPTIMIZER_STATISTICS_PACKAGE); + await transaction!.commit(); + await database.close(); + done(); + }); }); - const pool = database.pool_ as SessionPool; - const [tx1] = await database.getSnapshot(); - assert.strictEqual(pool.size, pool.options.incStep); - const [tx2] = await database.getSnapshot(); - const [tx3] = await database.getSnapshot(); - assert.strictEqual(pool.size, pool.options.max); - tx1.end(); - tx2.end(); - tx3.end(); - await database.close(); - }); - it('should respect options.max when a write session is requested', async () => { - const database = newTestDatabase({ - min: 0, - max: 3, - incStep: 2, - }); - const pool = database.pool_ as SessionPool; - const [tx1] = await database.getSnapshot(); - const [tx2] = await database.getSnapshot(); - assert.strictEqual(pool.size, pool.options.incStep); - await database.runTransactionAsync(async tx => { - if (!tx) { - assert.fail('Transaction failed'); + it('async transaction.run', async () => { + const database = newTestDatabase(); + try { + await database.runTransactionAsync(async transaction => { + await transaction.run(selectSql); + verifyQueryOptions(OPTIMIZER_VERSION, OPTIMIZER_STATISTICS_PACKAGE); + await transaction.commit(); + }); + } finally { + await database.close(); } - await tx.runUpdate(updateSql); - await tx.commit(); }); - assert.strictEqual(pool.size, pool.options.max); - tx1.end(); - tx2.end(); - await database.close(); }); }); @@ -4203,52 +2915,15 @@ describe('Spanner with mock server', () => { }); describe('batch-readonly-transaction', () => { - describe('when multiplexed session is disabled', () => { - before(() => { - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; - }); - after(() => { - delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; - }); - it('should use session from pool', async () => { - const database = newTestDatabase({min: 0, incStep: 1}); - const pool = database.pool_ as SessionPool; - assert.strictEqual(pool.size, 0); - const [transaction] = await database.createBatchTransaction(); - assert.strictEqual(pool.size, 1); - assert.strictEqual(pool.available, 0); - transaction.close(); - await database.close(); - }); - it('failing to close transaction should cause session leak error', async () => { - const database = newTestDatabase(); - await database.createBatchTransaction(); - try { - await database.close(); - assert.fail('missing expected session leak error'); - } catch (err) { - assert.ok(err instanceof SessionLeakError); - } - }); - }); - - describe('when session mode is default', () => { - it('should use multiplexed session', async () => { - const database = newTestDatabase({min: 0, incStep: 1}); - const pool = database.pool_ as SessionPool; - const multiplexedSession = ( - database.sessionFactory_ as SessionFactory - ).multiplexedSession_ as MultiplexedSession; - // pool is empty before call to createBatchTransaction - assert.strictEqual(pool.size, 0); - const [transaction] = await database.createBatchTransaction(); - // pool is empty after call to createBatchTransaction - assert.strictEqual(pool.size, 0); - // multiplexed session will get created by default - assert.notEqual(multiplexedSession._multiplexedSession, null); - transaction.close(); - await database.close(); - }); + it('should use multiplexed session', async () => { + const database = newTestDatabase({min: 0, incStep: 1}); + const multiplexedSession = (database.sessionFactory_ as SessionFactory) + .multiplexedSession_ as MultiplexedSession; + const [transaction] = await database.createBatchTransaction(); + // multiplexed session will get created by default + assert.notEqual(multiplexedSession._multiplexedSession, null); + transaction.close(); + await database.close(); }); }); @@ -7059,11 +5734,6 @@ describe('Spanner with mock server', () => { it('should list databases', async () => { const [databases] = await instance.getDatabases(); assert.strictEqual(databases.length, 2); - // Assert that listing the databases does not cause a session pool to be - // initialized for the databases. - for (const db of databases) { - assert.strictEqual((db.pool_ as SessionPool).size, 0); - } }); it('should create a database', async () => { @@ -7181,20 +5851,6 @@ describe('Spanner with mock server', () => { }); }); - describe('session-factory', () => { - it('should not propagate any error when disabling GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS after client initialization', done => { - const database = newTestDatabase(); - // disable env after database creation - process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS = 'false'; - const sessionFactory = database.sessionFactory_ as SessionFactory; - sessionFactory.getSession((err, _) => { - assert.ifError(err); - delete process.env.GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS; - done(); - }); - }); - }); - describe('XGoogRequestId', () => { const exporter = new InMemorySpanExporter(); const provider = new NodeTracerProvider({ @@ -7367,7 +6023,3 @@ function getRowCountFromStreamingSql( }); }); } - -function sleep(ms): Promise { - return new Promise(resolve => setTimeout(resolve, ms)); -}