From 0727f474b74014e5334e1194aa47c8c294805591 Mon Sep 17 00:00:00 2001 From: Maciej Krajowski-Kukiel Date: Wed, 14 Jan 2026 16:00:52 +0100 Subject: [PATCH 1/9] add pos-cli exec liquid command --- bin/pos-cli-exec-liquid.js | 33 ++++++++++ bin/pos-cli-exec.js | 8 +++ bin/pos-cli.js | 1 + test/exec-liquid.test.js | 120 +++++++++++++++++++++++++++++++++++++ 4 files changed, 162 insertions(+) create mode 100644 bin/pos-cli-exec-liquid.js create mode 100644 bin/pos-cli-exec.js create mode 100644 test/exec-liquid.test.js diff --git a/bin/pos-cli-exec-liquid.js b/bin/pos-cli-exec-liquid.js new file mode 100644 index 00000000..ae7fb3ca --- /dev/null +++ b/bin/pos-cli-exec-liquid.js @@ -0,0 +1,33 @@ +#!/usr/bin/env node + +const { program } = require('commander'); +const Gateway = require('../lib/proxy'); +const fetchAuthData = require('../lib/settings').fetchSettings; +const logger = require('../lib/logger'); + +program + .name('pos-cli exec liquid') + .argument('', 'name of environment. Example: staging') + .argument('', 'liquid code to execute as string') + .action(async (environment, code) => { + const authData = fetchAuthData(environment, program); + const gateway = new Gateway(authData); + + try { + const response = await gateway.liquid({ content: code }); + + if (response.error) { + logger.Error(`Liquid execution error: ${response.error}`); + process.exit(1); + } + + if (response.result) { + logger.Print(response.result); + } + } catch (error) { + logger.Error(`Failed to execute liquid: ${error.message}`); + process.exit(1); + } + }); + +program.parse(process.argv); \ No newline at end of file diff --git a/bin/pos-cli-exec.js b/bin/pos-cli-exec.js new file mode 100644 index 00000000..ac16ba86 --- /dev/null +++ b/bin/pos-cli-exec.js @@ -0,0 +1,8 @@ +#!/usr/bin/env node + +const { program } = require('commander'); + +program + .name('pos-cli exec') + .command('liquid ', 'execute liquid code on instance') + .parse(process.argv); \ No newline at end of file diff --git a/bin/pos-cli.js b/bin/pos-cli.js index 78d4d467..1463d9b9 100755 --- a/bin/pos-cli.js +++ b/bin/pos-cli.js @@ -24,6 +24,7 @@ program .command('data', 'export, import or clean data on instance') .command('deploy ', 'deploy code to environment').alias('d') .command('env', 'manage environments') + .command('exec', 'execute code on instance') .command('gui', 'gui for content editor, graphql, logs') .command('generate', 'generates files') .command('init', 'initialize directory structure') diff --git a/test/exec-liquid.test.js b/test/exec-liquid.test.js new file mode 100644 index 00000000..09144c39 --- /dev/null +++ b/test/exec-liquid.test.js @@ -0,0 +1,120 @@ +jest.mock('../lib/apiRequest', () => ({ + apiRequest: jest.fn() +})); + +const Gateway = require('../lib/proxy'); + +describe('Gateway liquid method', () => { + const { apiRequest } = require('../lib/apiRequest'); + + test('calls apiRequest with correct parameters', async () => { + apiRequest.mockResolvedValue({ result: 'HELLO WORLD', error: null }); + + const gateway = new Gateway({ url: 'http://example.com', token: '1234', email: 'test@example.com' }); + const result = await gateway.liquid({ content: "{{ 'hello world' | upcase }}" }); + + expect(apiRequest).toHaveBeenCalledWith({ + method: 'POST', + uri: 'http://example.com/api/app_builder/liquid_exec', + json: { content: "{{ 'hello world' | upcase }}" }, + forever: true, + request: expect.any(Function) + }); + expect(result).toEqual({ result: 'HELLO WORLD', error: null }); + }); + + test('handles liquid execution error', async () => { + apiRequest.mockResolvedValue({ result: null, error: 'Liquid syntax error' }); + + const gateway = new Gateway({ url: 'http://example.com', token: '1234', email: 'test@example.com' }); + const result = await gateway.liquid({ content: "{{ 'hello world' | invalid_filter }}" }); + + expect(result).toEqual({ result: null, error: 'Liquid syntax error' }); + }); +}); + +describe('exec liquid CLI', () => { + const exec = require('./utils/exec'); + const cliPath = require('./utils/cliPath'); + + const env = Object.assign(process.env, { + CI: true, + MPKIT_URL: 'http://example.com', + MPKIT_TOKEN: '1234', + MPKIT_EMAIL: 'foo@example.com' + }); + + test('requires code argument', async () => { + const { code, stderr } = await exec(`${cliPath} exec liquid staging`, { env }); + + expect(code).toEqual(1); + expect(stderr).toMatch("error: missing required argument 'code'"); + }); +}); + +// Integration test - requires real platformOS instance +describe('exec liquid integration', () => { + const exec = require('./utils/exec'); + const cliPath = require('./utils/cliPath'); + + // Only run if real credentials are available + const hasRealCredentials = process.env.MPKIT_URL && + process.env.MPKIT_TOKEN && + !process.env.MPKIT_URL.includes('example.com'); + + (hasRealCredentials ? test : test.skip)('executes liquid code on real instance', async () => { + const { stdout, stderr, code } = await exec(`${cliPath} exec liquid dev "{{ 'hello' | upcase }}"`, { + env: process.env, + timeout: 30000 + }); + + expect(code).toEqual(0); + expect(stdout).toMatch('HELLO'); + expect(stderr).toBe(''); + }, 30000); + + (hasRealCredentials ? test : test.skip)('handles liquid syntax error on real instance', async () => { + const { stdout, stderr, code } = await exec(`${cliPath} exec liquid dev "{{ 'hello' | invalid_filter }}"`, { + env: process.env, + timeout: 30000 + }); + + expect(code).toEqual(1); + expect(stderr).toMatch('Liquid execution error'); + }, 30000); + + (hasRealCredentials ? test : test.skip)('executes {{ \'now\' | to_time }} and returns current time', async () => { + const beforeTime = new Date(); + const { stdout, stderr, code } = await exec(`${cliPath} exec liquid dev "{{ 'now' | to_time }}"`, { + env: process.env, + timeout: 30000 + }); + const afterTime = new Date(); + + expect(code).toEqual(0); + expect(stderr).toBe(''); + + // Parse the returned time - liquid to_time returns ISO format like "2023-01-01 12:00:00 +0000" + const returnedTimeStr = stdout.trim(); + const returnedTime = new Date(returnedTimeStr); + + // Check that the returned time is within 1 second of the current time + const timeDiff = Math.abs(returnedTime.getTime() - beforeTime.getTime()); + expect(timeDiff).toBeLessThanOrEqual(1000); // 1 second in milliseconds + + // Also check it's not in the future beyond our test window + const futureDiff = afterTime.getTime() - returnedTime.getTime(); + expect(futureDiff).toBeGreaterThanOrEqual(0); + expect(futureDiff).toBeLessThanOrEqual(1000); + }, 30000); + + (hasRealCredentials ? test : test.skip)('handles unknown tag error', async () => { + const { stdout, stderr, code } = await exec(`${cliPath} exec liquid dev "{% hello %}"`, { + env: process.env, + timeout: 30000 + }); + + expect(code).toEqual(1); + expect(stderr).toMatch('Liquid execution error: Liquid syntax error: Unknown tag \'hello\''); + }, 30000); +}); From 7bc508876c9da6a0437fb0367d4e10b97f3d2ec7 Mon Sep 17 00:00:00 2001 From: Maciej Krajowski-Kukiel Date: Wed, 14 Jan 2026 16:12:24 +0100 Subject: [PATCH 2/9] add pos-cli exec graphql command --- bin/pos-cli-exec-graphql.js | 33 +++++++++++ bin/pos-cli-exec.js | 1 + test/exec-graphql.test.js | 109 ++++++++++++++++++++++++++++++++++++ 3 files changed, 143 insertions(+) create mode 100644 bin/pos-cli-exec-graphql.js create mode 100644 test/exec-graphql.test.js diff --git a/bin/pos-cli-exec-graphql.js b/bin/pos-cli-exec-graphql.js new file mode 100644 index 00000000..5f0e548e --- /dev/null +++ b/bin/pos-cli-exec-graphql.js @@ -0,0 +1,33 @@ +#!/usr/bin/env node + +const { program } = require('commander'); +const Gateway = require('../lib/proxy'); +const fetchAuthData = require('../lib/settings').fetchSettings; +const logger = require('../lib/logger'); + +program + .name('pos-cli exec graphql') + .argument('', 'name of environment. Example: staging') + .argument('', 'graphql query to execute as string') + .action(async (environment, graphql) => { + const authData = fetchAuthData(environment, program); + const gateway = new Gateway(authData); + + try { + const response = await gateway.graph({ query: graphql }); + + if (response.errors) { + logger.Error(`GraphQL execution error: ${JSON.stringify(response.errors, null, 2)}`); + process.exit(1); + } + + if (response.data) { + logger.Print(JSON.stringify(response, null, 2)); + } + } catch (error) { + logger.Error(`Failed to execute graphql: ${error.message}`); + process.exit(1); + } + }); + +program.parse(process.argv); \ No newline at end of file diff --git a/bin/pos-cli-exec.js b/bin/pos-cli-exec.js index ac16ba86..e4df94fb 100644 --- a/bin/pos-cli-exec.js +++ b/bin/pos-cli-exec.js @@ -5,4 +5,5 @@ const { program } = require('commander'); program .name('pos-cli exec') .command('liquid ', 'execute liquid code on instance') + .command('graphql ', 'execute graphql query on instance') .parse(process.argv); \ No newline at end of file diff --git a/test/exec-graphql.test.js b/test/exec-graphql.test.js new file mode 100644 index 00000000..ac922abb --- /dev/null +++ b/test/exec-graphql.test.js @@ -0,0 +1,109 @@ +jest.mock('../lib/apiRequest', () => ({ + apiRequest: jest.fn() +})); + +const Gateway = require('../lib/proxy'); + +describe('Gateway graph method', () => { + const { apiRequest } = require('../lib/apiRequest'); + + test('calls apiRequest with correct parameters', async () => { + const mockResponse = { + "data": { + "records": { + "results": [] + } + } + }; + apiRequest.mockResolvedValue(mockResponse); + + const gateway = new Gateway({ url: 'http://example.com', token: '1234', email: 'test@example.com' }); + const query = '{ records(per_page: 20) { results { id } } }'; + const result = await gateway.graph({ query }); + + expect(apiRequest).toHaveBeenCalledWith({ + method: 'POST', + uri: 'http://example.com/api/graph', + json: { query }, + forever: true, + request: expect.any(Function) + }); + expect(result).toEqual(mockResponse); + }); + + test('handles graphql execution error', async () => { + const mockErrorResponse = { + errors: [ + { + message: 'Syntax Error: Expected Name, found ', + locations: [{ line: 1, column: 40 }] + } + ] + }; + apiRequest.mockResolvedValue(mockErrorResponse); + + const gateway = new Gateway({ url: 'http://example.com', token: '1234', email: 'test@example.com' }); + const query = '{ records(per_page: 20) { results { id } '; // Missing closing brace + const result = await gateway.graph({ query }); + + expect(result).toEqual(mockErrorResponse); + }); +}); + +describe('exec graphql CLI', () => { + const exec = require('./utils/exec'); + const cliPath = require('./utils/cliPath'); + + const env = Object.assign(process.env, { + CI: true, + MPKIT_URL: 'http://example.com', + MPKIT_TOKEN: '1234', + MPKIT_EMAIL: 'foo@example.com' + }); + + test('requires graphql argument', async () => { + const { code, stderr } = await exec(`${cliPath} exec graphql staging`, { env }); + + expect(code).toEqual(1); + expect(stderr).toMatch("error: missing required argument 'graphql'"); + }); +}); + +// Integration test - requires real platformOS instance +describe('exec graphql integration', () => { + const exec = require('./utils/exec'); + const cliPath = require('./utils/cliPath'); + + // Only run if real credentials are available + const hasRealCredentials = process.env.MPKIT_URL && + process.env.MPKIT_TOKEN && + !process.env.MPKIT_URL.includes('example.com'); + + (hasRealCredentials ? test : test.skip)('executes graphql query on real instance', async () => { + const query = '{ records(per_page: 20) { results { id } } }'; + const { stdout, stderr, code } = await exec(`${cliPath} exec graphql dev "${query}"`, { + env: process.env, + timeout: 30000 + }); + + expect(code).toEqual(0); + expect(stderr).toBe(''); + + // Parse JSON response + const response = JSON.parse(stdout); + expect(response).toHaveProperty('data'); + expect(response.data).toHaveProperty('records'); + expect(Array.isArray(response.data.records.results)).toBe(true); + }, 30000); + + (hasRealCredentials ? test : test.skip)('handles graphql syntax error on real instance', async () => { + const invalidQuery = '{ records(per_page: 20) { results { id } '; // Missing closing brace + const { stdout, stderr, code } = await exec(`${cliPath} exec graphql dev "${invalidQuery}"`, { + env: process.env, + timeout: 30000 + }); + + expect(code).toEqual(1); + expect(stderr).toMatch('GraphQL execution error'); + }, 30000); +}); \ No newline at end of file From 672cd003fba745d6eb75a8f50830a70161edda12 Mon Sep 17 00:00:00 2001 From: Maciej Krajowski-Kukiel Date: Wed, 14 Jan 2026 16:19:26 +0100 Subject: [PATCH 3/9] extra confirmation for production --- bin/pos-cli-exec-graphql.js | 28 ++++++++++++++++++++++++++++ bin/pos-cli-exec-liquid.js | 28 ++++++++++++++++++++++++++++ test/exec-graphql.test.js | 22 ++++++++++++++++++++++ test/exec-liquid.test.js | 22 ++++++++++++++++++++++ 4 files changed, 100 insertions(+) diff --git a/bin/pos-cli-exec-graphql.js b/bin/pos-cli-exec-graphql.js index 5f0e548e..f0d84fc6 100644 --- a/bin/pos-cli-exec-graphql.js +++ b/bin/pos-cli-exec-graphql.js @@ -1,10 +1,30 @@ #!/usr/bin/env node const { program } = require('commander'); +const prompts = require('prompts'); const Gateway = require('../lib/proxy'); const fetchAuthData = require('../lib/settings').fetchSettings; const logger = require('../lib/logger'); +const isProductionEnvironment = (environment) => { + return environment && (environment.toLowerCase().includes('prod') || environment.toLowerCase().includes('production')); +}; + +const confirmProductionExecution = async (environment) => { + logger.Warn(`WARNING: You are executing GraphQL on a production environment: ${environment}`); + logger.Warn('This could potentially modify production data or cause unintended side effects.'); + logger.Warn(''); + + const response = await prompts({ + type: 'confirm', + name: 'confirmed', + message: `Are you sure you want to continue executing on ${environment}?`, + initial: false + }); + + return response.confirmed; +}; + program .name('pos-cli exec graphql') .argument('', 'name of environment. Example: staging') @@ -13,6 +33,14 @@ program const authData = fetchAuthData(environment, program); const gateway = new Gateway(authData); + if (isProductionEnvironment(environment)) { + const confirmed = await confirmProductionExecution(environment); + if (!confirmed) { + logger.Info('Execution cancelled.'); + process.exit(0); + } + } + try { const response = await gateway.graph({ query: graphql }); diff --git a/bin/pos-cli-exec-liquid.js b/bin/pos-cli-exec-liquid.js index ae7fb3ca..0947277f 100644 --- a/bin/pos-cli-exec-liquid.js +++ b/bin/pos-cli-exec-liquid.js @@ -1,10 +1,30 @@ #!/usr/bin/env node const { program } = require('commander'); +const prompts = require('prompts'); const Gateway = require('../lib/proxy'); const fetchAuthData = require('../lib/settings').fetchSettings; const logger = require('../lib/logger'); +const isProductionEnvironment = (environment) => { + return environment && (environment.toLowerCase().includes('prod') || environment.toLowerCase().includes('production')); +}; + +const confirmProductionExecution = async (environment) => { + logger.Warn(`WARNING: You are executing liquid code on a production environment: ${environment}`); + logger.Warn('This could potentially modify production data or cause unintended side effects.'); + logger.Warn(''); + + const response = await prompts({ + type: 'confirm', + name: 'confirmed', + message: `Are you sure you want to continue executing on ${environment}?`, + initial: false + }); + + return response.confirmed; +}; + program .name('pos-cli exec liquid') .argument('', 'name of environment. Example: staging') @@ -13,6 +33,14 @@ program const authData = fetchAuthData(environment, program); const gateway = new Gateway(authData); + if (isProductionEnvironment(environment)) { + const confirmed = await confirmProductionExecution(environment); + if (!confirmed) { + logger.Info('Execution cancelled.'); + process.exit(0); + } + } + try { const response = await gateway.liquid({ content: code }); diff --git a/test/exec-graphql.test.js b/test/exec-graphql.test.js index ac922abb..2763a5a9 100644 --- a/test/exec-graphql.test.js +++ b/test/exec-graphql.test.js @@ -67,6 +67,28 @@ describe('exec graphql CLI', () => { expect(code).toEqual(1); expect(stderr).toMatch("error: missing required argument 'graphql'"); }); + + test('cancels execution on production environment when user says no', async () => { + const { code, stdout, stderr } = await exec(`echo "n" | ${cliPath} exec graphql production "{ records { results { id } } }"`, { env }); + + expect(code).toEqual(0); + expect(stdout).toMatch('Execution cancelled.'); + }); + + test('proceeds with execution on production environment when user confirms', async () => { + const { code, stdout, stderr } = await exec(`echo "y" | ${cliPath} exec graphql production "{ records { results { id } } }"`, { env }); + + // This will fail because the mock API isn't set up, but we want to check it doesn't cancel + expect(stdout).not.toMatch('Execution cancelled.'); + expect(stderr).not.toMatch('Execution cancelled.'); + }); + + test('does not prompt for non-production environments', async () => { + const { code, stdout, stderr } = await exec(`${cliPath} exec graphql staging "{ records { results { id } } }"`, { env }); + + expect(stdout).not.toMatch('WARNING: You are executing GraphQL on a production environment'); + expect(stdout).not.toMatch('Execution cancelled.'); + }); }); // Integration test - requires real platformOS instance diff --git a/test/exec-liquid.test.js b/test/exec-liquid.test.js index 09144c39..a8f64449 100644 --- a/test/exec-liquid.test.js +++ b/test/exec-liquid.test.js @@ -50,6 +50,28 @@ describe('exec liquid CLI', () => { expect(code).toEqual(1); expect(stderr).toMatch("error: missing required argument 'code'"); }); + + test('cancels execution on production environment when user says no', async () => { + const { code, stdout, stderr } = await exec(`echo "n" | ${cliPath} exec liquid production "{{ 'hello' | upcase }}"`, { env }); + + expect(code).toEqual(0); + expect(stdout).toMatch('Execution cancelled.'); + }); + + test('proceeds with execution on production environment when user confirms', async () => { + const { code, stdout, stderr } = await exec(`echo "y" | ${cliPath} exec liquid production "{{ 'hello' | upcase }}"`, { env }); + + // This will fail because the mock API isn't set up, but we want to check it doesn't cancel + expect(stdout).not.toMatch('Execution cancelled.'); + expect(stderr).not.toMatch('Execution cancelled.'); + }); + + test('does not prompt for non-production environments', async () => { + const { code, stdout, stderr } = await exec(`${cliPath} exec liquid staging "{{ 'hello' | upcase }}"`, { env }); + + expect(stdout).not.toMatch('WARNING: You are executing liquid code on a production environment'); + expect(stdout).not.toMatch('Execution cancelled.'); + }); }); // Integration test - requires real platformOS instance From f9defba03d23f6c77af0d16a7ea0c7b184d63891 Mon Sep 17 00:00:00 2001 From: Maciej Krajowski-Kukiel Date: Thu, 15 Jan 2026 17:35:36 +0100 Subject: [PATCH 4/9] add pos-cli test run command --- bin/pos-cli-test-run.js | 517 +++++++++++++++ bin/pos-cli-test.js | 10 + bin/pos-cli.js | 1 + lib/proxy.js | 8 + package.json | 4 +- .../correct_with_assets/app/assets/bar.js | 8 + .../app/lib/test/debug_log_test.liquid | 11 + .../app/lib/test/example_test.liquid | 4 + .../app/lib/test/failing_test.liquid | 4 + .../with-tests-module/app/pos-modules.json | 5 + .../app/pos-modules.lock.json | 5 + .../app/views/pages/index.liquid | 4 + .../public/graphql/sent_mails/search.graphql | 20 + .../public/graphql/test_files/count.graphql | 12 + .../public/graphql/test_files/search.graphql | 15 + .../tests/public/lib/assertions/blank.liquid | 7 + .../tests/public/lib/assertions/equal.liquid | 9 + .../lib/assertions/invalid_object.liquid | 7 + .../public/lib/assertions/not_presence.liquid | 7 + .../public/lib/assertions/not_true.liquid | 10 + .../lib/assertions/not_valid_object.liquid | 7 + .../assertions/object_contains_object.liquid | 20 + .../public/lib/assertions/presence.liquid | 7 + .../tests/public/lib/assertions/true.liquid | 10 + .../public/lib/assertions/valid_object.liquid | 8 + .../tests/public/lib/commands/run.liquid | 46 ++ .../public/lib/helpers/register_error.liquid | 27 + .../public/lib/queries/sent_mails/find.liquid | 8 + .../lib/queries/sent_mails/search.liquid | 4 + .../tests/public/translations/en/should.yml | 16 + .../public/views/layouts/mailer.html.liquid | 165 +++++ .../tests/public/views/layouts/test.liquid | 154 +++++ .../views/pages/_tests/index.html.liquid | 10 + .../public/views/pages/_tests/index.js.liquid | 28 + .../public/views/pages/_tests/run.html.liquid | 11 + .../public/views/pages/_tests/run.js.liquid | 13 + .../views/pages/_tests/run_async.js.liquid | 13 + .../views/pages/_tests/run_async.liquid | 10 + .../pages/_tests/sent_mails/index.liquid | 11 + .../views/pages/_tests/sent_mails/show.liquid | 11 + .../views/partials/sent_mails/list.liquid | 20 + .../partials/sent_mails/pagination.liquid | 66 ++ .../views/partials/sent_mails/show.liquid | 8 + .../public/views/partials/tests/index.liquid | 18 + .../views/partials/tests/show_html.liquid | 25 + .../views/partials/tests/show_js.liquid | 30 + .../views/partials/tests/show_log.liquid | 7 + .../views/partials/tests/show_log_js.liquid | 8 + .../views/partials/tests/show_text.liquid | 22 + .../partials/tests/test_report_html.liquid | 17 + .../partials/tests/test_report_text.liquid | 5 + .../modules/tests/template-values.json | 7 + .../without-tests-module/app/pos-modules.json | 3 + test/test-run.test.js | 87 +++ test/test.test.js | 612 ++++++++++++++++++ 55 files changed, 2181 insertions(+), 1 deletion(-) create mode 100755 bin/pos-cli-test-run.js create mode 100755 bin/pos-cli-test.js create mode 100644 test/fixtures/test/with-tests-module/app/lib/test/debug_log_test.liquid create mode 100644 test/fixtures/test/with-tests-module/app/lib/test/example_test.liquid create mode 100644 test/fixtures/test/with-tests-module/app/lib/test/failing_test.liquid create mode 100644 test/fixtures/test/with-tests-module/app/pos-modules.json create mode 100644 test/fixtures/test/with-tests-module/app/pos-modules.lock.json create mode 100644 test/fixtures/test/with-tests-module/app/views/pages/index.liquid create mode 100644 test/fixtures/test/with-tests-module/modules/tests/public/graphql/sent_mails/search.graphql create mode 100644 test/fixtures/test/with-tests-module/modules/tests/public/graphql/test_files/count.graphql create mode 100644 test/fixtures/test/with-tests-module/modules/tests/public/graphql/test_files/search.graphql create mode 100644 test/fixtures/test/with-tests-module/modules/tests/public/lib/assertions/blank.liquid create mode 100644 test/fixtures/test/with-tests-module/modules/tests/public/lib/assertions/equal.liquid create mode 100644 test/fixtures/test/with-tests-module/modules/tests/public/lib/assertions/invalid_object.liquid create mode 100644 test/fixtures/test/with-tests-module/modules/tests/public/lib/assertions/not_presence.liquid create mode 100644 test/fixtures/test/with-tests-module/modules/tests/public/lib/assertions/not_true.liquid create mode 100644 test/fixtures/test/with-tests-module/modules/tests/public/lib/assertions/not_valid_object.liquid create mode 100644 test/fixtures/test/with-tests-module/modules/tests/public/lib/assertions/object_contains_object.liquid create mode 100644 test/fixtures/test/with-tests-module/modules/tests/public/lib/assertions/presence.liquid create mode 100644 test/fixtures/test/with-tests-module/modules/tests/public/lib/assertions/true.liquid create mode 100644 test/fixtures/test/with-tests-module/modules/tests/public/lib/assertions/valid_object.liquid create mode 100644 test/fixtures/test/with-tests-module/modules/tests/public/lib/commands/run.liquid create mode 100644 test/fixtures/test/with-tests-module/modules/tests/public/lib/helpers/register_error.liquid create mode 100644 test/fixtures/test/with-tests-module/modules/tests/public/lib/queries/sent_mails/find.liquid create mode 100644 test/fixtures/test/with-tests-module/modules/tests/public/lib/queries/sent_mails/search.liquid create mode 100644 test/fixtures/test/with-tests-module/modules/tests/public/translations/en/should.yml create mode 100644 test/fixtures/test/with-tests-module/modules/tests/public/views/layouts/mailer.html.liquid create mode 100644 test/fixtures/test/with-tests-module/modules/tests/public/views/layouts/test.liquid create mode 100644 test/fixtures/test/with-tests-module/modules/tests/public/views/pages/_tests/index.html.liquid create mode 100644 test/fixtures/test/with-tests-module/modules/tests/public/views/pages/_tests/index.js.liquid create mode 100644 test/fixtures/test/with-tests-module/modules/tests/public/views/pages/_tests/run.html.liquid create mode 100644 test/fixtures/test/with-tests-module/modules/tests/public/views/pages/_tests/run.js.liquid create mode 100644 test/fixtures/test/with-tests-module/modules/tests/public/views/pages/_tests/run_async.js.liquid create mode 100644 test/fixtures/test/with-tests-module/modules/tests/public/views/pages/_tests/run_async.liquid create mode 100644 test/fixtures/test/with-tests-module/modules/tests/public/views/pages/_tests/sent_mails/index.liquid create mode 100644 test/fixtures/test/with-tests-module/modules/tests/public/views/pages/_tests/sent_mails/show.liquid create mode 100644 test/fixtures/test/with-tests-module/modules/tests/public/views/partials/sent_mails/list.liquid create mode 100644 test/fixtures/test/with-tests-module/modules/tests/public/views/partials/sent_mails/pagination.liquid create mode 100644 test/fixtures/test/with-tests-module/modules/tests/public/views/partials/sent_mails/show.liquid create mode 100644 test/fixtures/test/with-tests-module/modules/tests/public/views/partials/tests/index.liquid create mode 100644 test/fixtures/test/with-tests-module/modules/tests/public/views/partials/tests/show_html.liquid create mode 100644 test/fixtures/test/with-tests-module/modules/tests/public/views/partials/tests/show_js.liquid create mode 100644 test/fixtures/test/with-tests-module/modules/tests/public/views/partials/tests/show_log.liquid create mode 100644 test/fixtures/test/with-tests-module/modules/tests/public/views/partials/tests/show_log_js.liquid create mode 100644 test/fixtures/test/with-tests-module/modules/tests/public/views/partials/tests/show_text.liquid create mode 100644 test/fixtures/test/with-tests-module/modules/tests/public/views/partials/tests/test_report_html.liquid create mode 100644 test/fixtures/test/with-tests-module/modules/tests/public/views/partials/tests/test_report_text.liquid create mode 100644 test/fixtures/test/with-tests-module/modules/tests/template-values.json create mode 100644 test/fixtures/test/without-tests-module/app/pos-modules.json create mode 100644 test/test-run.test.js create mode 100644 test/test.test.js diff --git a/bin/pos-cli-test-run.js b/bin/pos-cli-test-run.js new file mode 100755 index 00000000..23efe39f --- /dev/null +++ b/bin/pos-cli-test-run.js @@ -0,0 +1,517 @@ +#!/usr/bin/env node + +const EventEmitter = require('events'); +const { program } = require('commander'); +const chalk = require('chalk'); +const Gateway = require('../lib/proxy'); +const fetchAuthData = require('../lib/settings').fetchSettings; +const logger = require('../lib/logger'); + +const formatDuration = (ms) => { + if (ms < 1000) return `${ms}ms`; + if (ms < 60000) return `${(ms / 1000).toFixed(2)}s`; + const minutes = Math.floor(ms / 60000); + const seconds = ((ms % 60000) / 1000).toFixed(2); + return `${minutes}m ${seconds}s`; +}; + +class TestLogStream extends EventEmitter { + constructor(authData, timeout = 30000, testRunId = null, testName = null) { + super(); + this.authData = authData; + this.gateway = new Gateway(authData); + this.timeout = timeout; + this.testRunId = testRunId; + this.testName = testName; // The test_name from run_async.js (e.g., "liquid_test_xxxxx") + this.startTime = Date.now(); + this.testStarted = false; + this.completed = false; + this.messageBuffer = ''; // Buffer for multi-line messages + this.lastMessageTime = 0; + this.liquidTestSeen = false; + this.liquidTestTime = 0; + } + + isValidTestSummaryJson(message) { + try { + // Parse as JSON + const obj = JSON.parse(message); + + // Check if it has test summary structure + const hasTestsArray = Array.isArray(obj.tests); + const hasSuccessField = typeof obj.success === 'boolean'; + const hasTotalField = typeof obj.total_tests === 'number' || typeof obj.total === 'number'; + const hasDurationField = typeof obj.duration_ms === 'number' || typeof obj.duration === 'number'; + + // If we have a testRunId, check that it matches + if (this.testRunId && obj.test_run_id !== this.testRunId) { + return false; + } + + return hasTestsArray && hasSuccessField && (hasTotalField || hasDurationField); + } catch (e) { + return false; + } + } + + start() { + this.intervalId = setInterval(() => this.fetchLogs(), 2000); + this.timeoutId = setTimeout(() => { + this.emit('timeout'); + this.stop(); + }, this.timeout); + + logger.Debug('Starting test log streaming...'); + } + + stop() { + if (this.intervalId) { + clearInterval(this.intervalId); + this.intervalId = null; + } + if (this.timeoutId) { + clearTimeout(this.timeoutId); + this.timeoutId = null; + } + } + + fetchLogs() { + this.gateway.logs({ lastId: this.lastId || 0 }) + .then((response) => { + const logs = response && response.logs; + if (!logs) return; + + for (let k in logs) { + const row = logs[k]; + + if (this.lastId && row.id <= this.lastId) continue; + this.lastId = row.id; + + logger.Debug(`[DEBUG] Processing log entry: ${JSON.stringify(row)}`); + this.processLogMessage(row); + } + }) + .catch(error => { + logger.Debug(`Error fetching logs: ${error.message}`); + }); + } + + processLogMessage(row) { + const message = row.message || ''; + const logType = row.error_type || ''; + const fullMessage = typeof message === 'string' ? message : JSON.stringify(message); + const summaryType = this.testName ? `${this.testName} SUMMARY` : null; + + // Only process logs that are related to our test run + // If we have a testName, filter by it + if (this.testName) { + // Check for test start - look for "Starting unit tests" or "Starting test run" with matching type + if (logType === this.testName && (fullMessage.includes('Starting unit tests') || fullMessage.includes('Starting test run')) && !this.testStarted) { + this.testStarted = true; + this.emit('testStarted'); + return; // Don't emit this as a log + } + + // Check for test completion - look for log with type " SUMMARY" + if (!this.completed && logType === summaryType && this.isValidTestSummaryJson(fullMessage)) { + if (!this.liquidTestSeen) { + this.liquidTestSeen = true; + this.liquidTestTime = Date.now(); + } + + // Parse as JSON summary + const testResults = this.parseJsonSummary(fullMessage); + if (testResults) { + this.completed = true; + this.emit('testCompleted', testResults); + this.stop(); + return; + } + } + + // Only show logs after test started and before completion + if (this.testStarted && !this.completed) { + // Determine if this is a test log (type matches test_name) or debug log (any other type) + const isTestLog = logType === this.testName; + this.emit('testLog', row, isTestLog); + } + } else { + // Legacy behavior when testName is not available + // Check for test start + if (fullMessage.includes('Starting unit tests') && !this.testStarted) { + this.testStarted = true; + this.emit('testStarted'); + } + + // Check for test completion - look for the JSON summary format (tests module 1.1.1+) + if (!this.completed && this.isValidTestSummaryJson(fullMessage)) { + if (!this.liquidTestSeen) { + this.liquidTestSeen = true; + this.liquidTestTime = Date.now(); + } + + // Parse as JSON summary + const testResults = this.parseJsonSummary(fullMessage); + if (testResults) { + this.completed = true; + this.emit('testCompleted', testResults); + this.stop(); + return; + } + } + + // Also show individual test logs + if (this.testStarted && !this.completed) { + this.emit('testLog', row, true); + } + } + } + + parseJsonSummary(message) { + try { + // Parse JSON (already validated as valid by isValidTestSummaryJson) + const summary = JSON.parse(message); + + // Map fields from tests module format to our internal format + const total = summary.total_tests || summary.total || 0; + const assertions = summary.total_assertions || summary.assertions || 0; + const duration = summary.duration_ms || summary.duration || 0; + + // Calculate passed/failed from success flag + let passed = 0; + let failed = 0; + + if (summary.success === true) { + passed = total; + failed = summary.total_errors || 0; + } else if (summary.success === false) { + failed = summary.total_errors || 0; + passed = Math.max(0, total - failed); + } + + // Map individual tests + const tests = []; + if (summary.tests && Array.isArray(summary.tests)) { + summary.tests.forEach(test => { + const testItem = { + name: test.name || 'Unknown test', + status: test.success ? 'passed' : 'failed', + passed: test.success, + assertions: test.assertions + }; + + // Handle errors - could be object with error details or array + if (test.errors) { + if (typeof test.errors === 'object' && Object.keys(test.errors).length > 0) { + testItem.error = JSON.stringify(test.errors); + } else if (Array.isArray(test.errors) && test.errors.length > 0) { + testItem.errors = test.errors; + } + } + + tests.push(testItem); + }); + } + + return { + total, + passed, + failed, + assertions, + tests, + duration + }; + } catch (error) { + logger.Debug(`[DEBUG] Failed to parse JSON summary: ${error.message}`); + return null; + } + } + + +} + +const printTestResults = (results, duration) => { + const { passed = 0, failed = 0, total = 0, tests = [] } = results; + + if (tests && tests.length > 0) { + logger.Info('\nTest Results:', { hideTimestamp: true }); + logger.Info('─'.repeat(60), { hideTimestamp: true }); + + tests.forEach(test => { + const status = test.status || (test.passed ? 'passed' : 'failed'); + const icon = status === 'passed' ? '✓' : '✗'; + const name = test.name || test.test_name || 'Unknown test'; + + if (status === 'passed') { + logger.Success(` ${icon} ${name}`, { hideTimestamp: true }); + } else { + logger.Error(` ${icon} ${name}`, { hideTimestamp: true, exit: false, notify: false }); + if (test.error || test.message) { + logger.Error(` Error: ${test.error || test.message}`, { hideTimestamp: true, exit: false, notify: false }); + } + if (test.errors && Array.isArray(test.errors)) { + test.errors.forEach(err => { + logger.Error(` - ${err.message || err}`, { hideTimestamp: true, exit: false, notify: false }); + }); + } + } + }); + + logger.Info('─'.repeat(60), { hideTimestamp: true }); + } + + // Print summary + const totalTests = total || (passed + failed); + const summary = []; + if (passed > 0) summary.push(`${passed} passed`); + if (failed > 0) summary.push(`${failed} failed`); + + const summaryText = summary.length > 0 ? summary.join(', ') : 'No tests executed'; + const durationText = duration ? ` in ${formatDuration(duration)}` : ''; + + if (failed > 0) { + logger.Error(`\n${summaryText} (${totalTests} total)${durationText}`, { hideTimestamp: true, exit: false, notify: false }); + } else if (passed > 0) { + logger.Success(`\n${summaryText} (${totalTests} total)${durationText}`, { hideTimestamp: true }); + } else { + logger.Warn(`\n${summaryText}${durationText}`, { hideTimestamp: true }); + } + + return failed === 0; +}; + +const transformTestResponse = (response) => { + // Transform API response format to the format expected by printTestResults + // API returns: { success, total_tests, total_assertions, total_errors, duration_ms, tests: [{name, success, assertions, errors}] } + // printTestResults expects: { passed, failed, total, tests: [{name, status, passed, error}], duration } + + const total = response.total_tests || response.total || 0; + const totalErrors = response.total_errors || 0; + + let passed = 0; + let failed = 0; + + if (response.success === true) { + passed = total; + failed = totalErrors; + } else { + failed = totalErrors || (total > 0 ? 1 : 0); + passed = Math.max(0, total - failed); + } + + const tests = []; + if (response.tests && Array.isArray(response.tests)) { + response.tests.forEach(test => { + const testItem = { + name: test.name || 'Unknown test', + status: test.success ? 'passed' : 'failed', + passed: test.success, + assertions: test.assertions + }; + + // Handle errors - could be object with error details or array + if (test.errors) { + if (typeof test.errors === 'object' && Object.keys(test.errors).length > 0) { + testItem.error = JSON.stringify(test.errors); + } else if (Array.isArray(test.errors) && test.errors.length > 0) { + testItem.errors = test.errors; + } + } + + tests.push(testItem); + }); + } + + return { + total, + passed, + failed, + assertions: response.total_assertions || 0, + tests, + duration: response.duration_ms || response.duration || 0 + }; +}; + +const runSingleTest = async (gateway, name) => { + const startTime = Date.now(); + + try { + const response = await gateway.test(name); + const duration = Date.now() - startTime; + + if (!response) { + logger.Error('No response received from test endpoint'); + return false; + } + + // Handle error response (not test failure, but actual error) + if (response.error && !response.tests) { + logger.Error(`Test error: ${response.error}`); + return false; + } + + // Handle the JSON response from /_tests/run.js + if (typeof response === 'object') { + const transformedResults = transformTestResponse(response); + return printTestResults(transformedResults, transformedResults.duration || duration); + } + + // Fallback for unexpected response format + logger.Print(JSON.stringify(response, null, 2)); + return true; + } catch (error) { + // Handle HTTP 500 errors that contain valid test results + // The test endpoint returns 500 when tests fail, but includes results in the body + const errorMessage = error.message || ''; + const jsonMatch = errorMessage.match(/^(\d+)\s*-\s*(\{.+\})$/); + + if (jsonMatch) { + try { + const response = JSON.parse(jsonMatch[2]); + if (response.tests && Array.isArray(response.tests)) { + const transformedResults = transformTestResponse(response); + return printTestResults(transformedResults, transformedResults.duration); + } + } catch (parseError) { + // Fall through to generic error handling + } + } + + logger.Error(`Failed to execute test: ${error.message}`); + return false; + } +}; + +const formatTestLog = (logRow, isTestLog) => { + const message = logRow.message || ''; + const logType = logRow.type || ''; + const fullMessage = typeof message === 'string' ? message : JSON.stringify(message); + const cleanMessage = fullMessage.replace(/\n$/, ''); + + // Check if this message contains a path to a test file + const hasTestPath = /app\/lib\/test\/|modules\/.*\/test\/|\.liquid/.test(cleanMessage); + + if (isTestLog && hasTestPath) { + // Test log with path - highlight it (new test indicator) + return chalk.cyan.bold(`▶ ${cleanMessage}`); + } else if (isTestLog) { + // Test log without path - normal display + return chalk.white(` ${cleanMessage}`); + } else { + // Debug log (type != test_name) - dim display + return chalk.dim(` [debug:${logType}] ${cleanMessage}`); + } +}; + +const runAllTests = async (gateway, authData) => { + return new Promise((resolve, reject) => { + let resolved = false; + + // Start the test run and get the test_run_id and test_name + logger.Info('Starting test run...'); + gateway.testRunAsync().then(testRunResponse => { + const testRunId = testRunResponse && testRunResponse.test_run_id; + const testName = testRunResponse && testRunResponse.test_name; + + logger.Debug(`Test run started with test_name: ${testName}`); + + const stream = new TestLogStream(authData, 180000, testRunId, testName); // 3 minute timeout for async tests + + const finish = (result) => { + if (resolved) return; // Prevent multiple resolutions + resolved = true; + stream.stop(); + resolve(result); + }; + + stream.on('testStarted', () => { + logger.Info('Test execution started...', { hideTimestamp: true }); + }); + + stream.on('testLog', (logRow, isTestLog) => { + // Display individual test logs with syntax highlighting + const message = logRow.message || ''; + const fullMessage = typeof message === 'string' ? message : JSON.stringify(message); + + // Don't show JSON summary messages as logs - they will be processed as completion signals + if (stream.isValidTestSummaryJson(fullMessage)) { + return; + } + + // Format and display the log with appropriate highlighting + const formattedLog = formatTestLog(logRow, isTestLog); + console.log(formattedLog); + }); + + stream.on('testCompleted', (results) => { + logger.Info('Test execution completed, processing results...', { hideTimestamp: true }); + const success = printTestResults(results, results.duration); + finish(success); + }); + + stream.on('timeout', () => { + logger.Error('Test execution timed out - no completion message received within 3 minutes'); + finish(false); + }); + + // Start listening for logs + stream.start(); + }).catch(error => { + logger.Error(`Failed to start test execution: ${error.message}`); + resolve(false); + }); + }); +}; + +program + .name('pos-cli test run') + .argument('', 'name of environment. Example: staging') + .argument('[name]', 'name of the test to execute (runs all tests if not provided)') + .action(async (environment, name) => { + const authData = fetchAuthData(environment, program); + const gateway = new Gateway(authData); + + try { + // Display the instance URL for clarity + logger.Info(`Running tests on: ${authData.url}`, { hideTimestamp: true }); + // First check if tests module is installed + const modules = await gateway.listModules(); + const hasTestsModule = modules.data && modules.data.some(module => module === 'tests'); + + if (!hasTestsModule) { + logger.Error(`Tests module not found. Please install the tests module: + pos-cli modules install tests + pos-cli deploy ${environment} +Then re-run the command.`); + process.exit(1); + } + + let success; + if (name) { + // Run single test with .js format + success = await runSingleTest(gateway, name); + } else { + // Run all tests via run_async with log streaming + success = await runAllTests(gateway, authData); + } + + process.exit(success ? 0 : 1); + } catch (error) { + logger.Error(`Failed to execute test: ${error.message}`); + process.exit(1); + } + }); + +// Only parse arguments if this file is run directly, not when required for testing +if (require.main === module) { + program.parse(process.argv); +} + +// Export for testing +module.exports = { + TestLogStream, + formatDuration, + formatTestLog, + printTestResults, + runAllTests +}; diff --git a/bin/pos-cli-test.js b/bin/pos-cli-test.js new file mode 100755 index 00000000..19a51cd8 --- /dev/null +++ b/bin/pos-cli-test.js @@ -0,0 +1,10 @@ +#!/usr/bin/env node + +const { program } = require('commander'); + +program.showHelpAfterError(); +program + .name('pos-cli test') + .command('run [name]', 'run tests on instance (all tests if name not provided)') + .command('list ', 'list available tests on instance') + .parse(process.argv); diff --git a/bin/pos-cli.js b/bin/pos-cli.js index 1463d9b9..ed111314 100755 --- a/bin/pos-cli.js +++ b/bin/pos-cli.js @@ -34,5 +34,6 @@ program .command('modules', 'manage modules') .command('pull', 'export app data to a zip file') .command('sync ', 'update environment on file change').alias('s') + .command('test', 'run tests on instance') .command('uploads', 'manage uploads files') .parse(process.argv); diff --git a/lib/proxy.js b/lib/proxy.js index d09a53f0..4aa997d5 100644 --- a/lib/proxy.js +++ b/lib/proxy.js @@ -126,6 +126,14 @@ class Gateway { return this.apiRequest({ method: 'POST', uri: `${this.api_url}/liquid_exec`, json, forever: true }); } + test(name) { + return this.apiRequest({ uri: `${this.url}/_tests/run.js?name=${name}` }); + } + + testRunAsync() { + return this.apiRequest({ uri: `${this.url}/_tests/run_async` }); + } + listModules() { return this.apiRequest({ uri: `${this.api_url}/installed_modules` }); } diff --git a/package.json b/package.json index ac39bac6..e002cfbc 100644 --- a/package.json +++ b/package.json @@ -79,7 +79,9 @@ "pos-cli-logsv2-search": "bin/pos-cli-logsv2-search.js", "pos-cli-migrations": "bin/pos-cli-migrations.js", "pos-cli-modules": "bin/pos-cli-modules.js", - "pos-cli-sync": "bin/pos-cli-sync.js" + "pos-cli-sync": "bin/pos-cli-sync.js", + "pos-cli-test": "bin/pos-cli-test.js", + "pos-cli-test-run": "bin/pos-cli-test-run.js" }, "repository": "platform-OS/pos-cli", "license": "CC BY 3.0", diff --git a/test/fixtures/deploy/correct_with_assets/app/assets/bar.js b/test/fixtures/deploy/correct_with_assets/app/assets/bar.js index a693db7f..7f517ba1 100644 --- a/test/fixtures/deploy/correct_with_assets/app/assets/bar.js +++ b/test/fixtures/deploy/correct_with_assets/app/assets/bar.js @@ -1 +1,9 @@ // Test asset file +x +x +x +x +x +x +x +x diff --git a/test/fixtures/test/with-tests-module/app/lib/test/debug_log_test.liquid b/test/fixtures/test/with-tests-module/app/lib/test/debug_log_test.liquid new file mode 100644 index 00000000..e484d1c2 --- /dev/null +++ b/test/fixtures/test/with-tests-module/app/lib/test/debug_log_test.liquid @@ -0,0 +1,11 @@ +{% liquid + assign var = '{"field": 1 }' | parse_json + + # This debug log has a different type than the test_name, so it should be dimmed + log 'Debug: checking variable value', type: 'debug' + + include 'modules/tests/assertions/equal', contract: contract, given: var.field, expected: 1, field_name: 'field' + + # Another debug log with custom type + log 'Custom debug message from test', type: 'custom_debug' +%} diff --git a/test/fixtures/test/with-tests-module/app/lib/test/example_test.liquid b/test/fixtures/test/with-tests-module/app/lib/test/example_test.liquid new file mode 100644 index 00000000..39fc990d --- /dev/null +++ b/test/fixtures/test/with-tests-module/app/lib/test/example_test.liquid @@ -0,0 +1,4 @@ +{% liquid + assign var = '{"field": 1 }' | parse_json + include 'modules/tests/assertions/equal', contract: contract, given: var.field, expected: 1, field_name: 'field' +%} \ No newline at end of file diff --git a/test/fixtures/test/with-tests-module/app/lib/test/failing_test.liquid b/test/fixtures/test/with-tests-module/app/lib/test/failing_test.liquid new file mode 100644 index 00000000..17c5c587 --- /dev/null +++ b/test/fixtures/test/with-tests-module/app/lib/test/failing_test.liquid @@ -0,0 +1,4 @@ +{% liquid + assign var = '{"field": 1 }' | parse_json + include 'modules/tests/assertions/equal', contract: contract, given: var.field, expected: 2, field_name: 'field' +%} diff --git a/test/fixtures/test/with-tests-module/app/pos-modules.json b/test/fixtures/test/with-tests-module/app/pos-modules.json new file mode 100644 index 00000000..ee3202bf --- /dev/null +++ b/test/fixtures/test/with-tests-module/app/pos-modules.json @@ -0,0 +1,5 @@ +{ + "modules": { + "tests": "1.2.0" + } +} \ No newline at end of file diff --git a/test/fixtures/test/with-tests-module/app/pos-modules.lock.json b/test/fixtures/test/with-tests-module/app/pos-modules.lock.json new file mode 100644 index 00000000..ee3202bf --- /dev/null +++ b/test/fixtures/test/with-tests-module/app/pos-modules.lock.json @@ -0,0 +1,5 @@ +{ + "modules": { + "tests": "1.2.0" + } +} \ No newline at end of file diff --git a/test/fixtures/test/with-tests-module/app/views/pages/index.liquid b/test/fixtures/test/with-tests-module/app/views/pages/index.liquid new file mode 100644 index 00000000..ddae8958 --- /dev/null +++ b/test/fixtures/test/with-tests-module/app/views/pages/index.liquid @@ -0,0 +1,4 @@ +--- +slug: test-fixture-index +--- +Test fixture placeholder page diff --git a/test/fixtures/test/with-tests-module/modules/tests/public/graphql/sent_mails/search.graphql b/test/fixtures/test/with-tests-module/modules/tests/public/graphql/sent_mails/search.graphql new file mode 100644 index 00000000..7ea4a0b2 --- /dev/null +++ b/test/fixtures/test/with-tests-module/modules/tests/public/graphql/sent_mails/search.graphql @@ -0,0 +1,20 @@ +query mails($id: ID, $limit: Int = 20, $page: Int = 1) { + mails: admin_sent_notifications( + per_page: $limit + page: $page + filter: { id: { value: $id }, notification_type: { value: EMAIL } } + sort: { created_at: { order: DESC } } + ) { + total_entries + total_pages + current_page + has_previous_page + has_next_page + results { + id + created_at + content + options + } + } +} diff --git a/test/fixtures/test/with-tests-module/modules/tests/public/graphql/test_files/count.graphql b/test/fixtures/test/with-tests-module/modules/tests/public/graphql/test_files/count.graphql new file mode 100644 index 00000000..d507b054 --- /dev/null +++ b/test/fixtures/test/with-tests-module/modules/tests/public/graphql/test_files/count.graphql @@ -0,0 +1,12 @@ +query count_test_partials($path: String, $per_page: Int!){ + admin_liquid_partials( + per_page: $per_page + filter: { + path: { ends_with: "_test", contains: $path } + } + + ) { + total_entries + total_pages + } +} diff --git a/test/fixtures/test/with-tests-module/modules/tests/public/graphql/test_files/search.graphql b/test/fixtures/test/with-tests-module/modules/tests/public/graphql/test_files/search.graphql new file mode 100644 index 00000000..0b6bf71b --- /dev/null +++ b/test/fixtures/test/with-tests-module/modules/tests/public/graphql/test_files/search.graphql @@ -0,0 +1,15 @@ +query test_partials($path: String, $per_page: Int = 100, $page: Int = 1){ + admin_liquid_partials( + per_page: $per_page + page: $page + filter: { + path: { ends_with: "_test", contains: $path } + } + + ) { + total_entries + results { + path + } + } +} diff --git a/test/fixtures/test/with-tests-module/modules/tests/public/lib/assertions/blank.liquid b/test/fixtures/test/with-tests-module/modules/tests/public/lib/assertions/blank.liquid new file mode 100644 index 00000000..3a83aff5 --- /dev/null +++ b/test/fixtures/test/with-tests-module/modules/tests/public/lib/assertions/blank.liquid @@ -0,0 +1,7 @@ +{% liquid + hash_assign contract['total'] = contract['total'] | plus: 1 + unless object[field_name] == blank + function contract = 'modules/tests/helpers/register_error', contract: contract, field_name: field_name, key: 'modules/tests/should.be_blank', message: null + endunless + return contract +%} diff --git a/test/fixtures/test/with-tests-module/modules/tests/public/lib/assertions/equal.liquid b/test/fixtures/test/with-tests-module/modules/tests/public/lib/assertions/equal.liquid new file mode 100644 index 00000000..33a4401e --- /dev/null +++ b/test/fixtures/test/with-tests-module/modules/tests/public/lib/assertions/equal.liquid @@ -0,0 +1,9 @@ +{% liquid + hash_assign contract['total'] = contract['total'] | plus: 1 + + if given != expected + assign msg = 'modules/tests/should.equal' | t: given: given, expected: expected + function contract = 'modules/tests/helpers/register_error', contract: contract, field_name: field_name, message: msg + endif + return contract +%} diff --git a/test/fixtures/test/with-tests-module/modules/tests/public/lib/assertions/invalid_object.liquid b/test/fixtures/test/with-tests-module/modules/tests/public/lib/assertions/invalid_object.liquid new file mode 100644 index 00000000..ed73da97 --- /dev/null +++ b/test/fixtures/test/with-tests-module/modules/tests/public/lib/assertions/invalid_object.liquid @@ -0,0 +1,7 @@ +{% liquid + hash_assign contract['total'] = contract['total'] | plus: 1 + if object.valid + function contract = 'modules/tests/helpers/register_error', contract: contract, field_name: field_name, message: object.errors + endif + return contract +%} diff --git a/test/fixtures/test/with-tests-module/modules/tests/public/lib/assertions/not_presence.liquid b/test/fixtures/test/with-tests-module/modules/tests/public/lib/assertions/not_presence.liquid new file mode 100644 index 00000000..f9806eb6 --- /dev/null +++ b/test/fixtures/test/with-tests-module/modules/tests/public/lib/assertions/not_presence.liquid @@ -0,0 +1,7 @@ +{% liquid + hash_assign contract['total'] = contract['total'] | plus: 1 + if object[field_name] != blank + function contract = 'modules/tests/helpers/register_error', contract: contract, field_name: field_name, key: 'modules/tests/should.not.be_blank' + endif + return contract +%} diff --git a/test/fixtures/test/with-tests-module/modules/tests/public/lib/assertions/not_true.liquid b/test/fixtures/test/with-tests-module/modules/tests/public/lib/assertions/not_true.liquid new file mode 100644 index 00000000..61b055ac --- /dev/null +++ b/test/fixtures/test/with-tests-module/modules/tests/public/lib/assertions/not_true.liquid @@ -0,0 +1,10 @@ +{% liquid + hash_assign contract['total'] = contract['total'] | plus: 1 + + assign value = value | default: object[field_name] + if value + function contract = 'modules/tests/helpers/register_error', contract: contract, field_name: field_name, key: 'modules/tests/should.not.be_true' + endif + + return contract +%} diff --git a/test/fixtures/test/with-tests-module/modules/tests/public/lib/assertions/not_valid_object.liquid b/test/fixtures/test/with-tests-module/modules/tests/public/lib/assertions/not_valid_object.liquid new file mode 100644 index 00000000..5268eb4c --- /dev/null +++ b/test/fixtures/test/with-tests-module/modules/tests/public/lib/assertions/not_valid_object.liquid @@ -0,0 +1,7 @@ +{% liquid + hash_assign contract['total'] = contract['total'] | plus: 1 + if object.valid == true + function contract = 'modules/tests/helpers/register_error', contract: contract, field_name: field_name, key: 'modules/tests/should.not.be_valid' + endif + return contract +%} diff --git a/test/fixtures/test/with-tests-module/modules/tests/public/lib/assertions/object_contains_object.liquid b/test/fixtures/test/with-tests-module/modules/tests/public/lib/assertions/object_contains_object.liquid new file mode 100644 index 00000000..15eec117 --- /dev/null +++ b/test/fixtures/test/with-tests-module/modules/tests/public/lib/assertions/object_contains_object.liquid @@ -0,0 +1,20 @@ +{% liquid + hash_assign contract['total'] = contract['total'] | plus: 1 + + for property in object_contains + assign key = property[0] + assign value = property[1] + + if given[key] == blank + assign message = 'modules/tests/should.have_key' | t: field_name: field_name + function contract = 'modules/tests/helpers/register_error', contract: contract, field_name: key, message: message + else + if given[key] != value + assign message = 'modules/tests/should.have_key_with_value' | t: value: value + function contract = 'modules/tests/helpers/register_error', contract: contract, field_name: key, message: message + endif + endif + endfor + + return contract +%} diff --git a/test/fixtures/test/with-tests-module/modules/tests/public/lib/assertions/presence.liquid b/test/fixtures/test/with-tests-module/modules/tests/public/lib/assertions/presence.liquid new file mode 100644 index 00000000..10f1ab7e --- /dev/null +++ b/test/fixtures/test/with-tests-module/modules/tests/public/lib/assertions/presence.liquid @@ -0,0 +1,7 @@ +{% liquid + hash_assign contract['total'] = contract['total'] | plus: 1 + if object[field_name] == blank + function contract = 'modules/tests/helpers/register_error', contract: contract, field_name: field_name, key: 'modules/tests/should.not.be_blank' + endif + return contract +%} diff --git a/test/fixtures/test/with-tests-module/modules/tests/public/lib/assertions/true.liquid b/test/fixtures/test/with-tests-module/modules/tests/public/lib/assertions/true.liquid new file mode 100644 index 00000000..daac6d3b --- /dev/null +++ b/test/fixtures/test/with-tests-module/modules/tests/public/lib/assertions/true.liquid @@ -0,0 +1,10 @@ +{% liquid + hash_assign contract['total'] = contract['total'] | plus: 1 + + assign value = value | default: object[field_name] + unless value + function contract = 'modules/tests/helpers/register_error', contract: contract, field_name: field_name, key: 'modules/tests/should.be_true' + endunless + + return contract +%} diff --git a/test/fixtures/test/with-tests-module/modules/tests/public/lib/assertions/valid_object.liquid b/test/fixtures/test/with-tests-module/modules/tests/public/lib/assertions/valid_object.liquid new file mode 100644 index 00000000..a4c44f2b --- /dev/null +++ b/test/fixtures/test/with-tests-module/modules/tests/public/lib/assertions/valid_object.liquid @@ -0,0 +1,8 @@ +{% liquid + hash_assign contract['total'] = contract['total'] | plus: 1 + if object.valid != true + assign message = 'should be valid: ' | append: object.errors + function contract = 'modules/tests/helpers/register_error', contract: contract, field_name: field_name, message: message + endif + return contract +%} diff --git a/test/fixtures/test/with-tests-module/modules/tests/public/lib/commands/run.liquid b/test/fixtures/test/with-tests-module/modules/tests/public/lib/commands/run.liquid new file mode 100644 index 00000000..87930c7d --- /dev/null +++ b/test/fixtures/test/with-tests-module/modules/tests/public/lib/commands/run.liquid @@ -0,0 +1,46 @@ +{% liquid + assign ctx = context + hash_assign ctx['tests'] = true + log 'Starting unit tests', type: test_name + assign __start = "now" | to_time + assign per_page = 100 + graphql total_pages = 'modules/tests/test_files/count', per_page: per_page, path: context.params.name | dig: "admin_liquid_partials" | dig: "total_pages" + + if tests.size == 0 + unless format == 'js' + echo 'no tests found' + endunless + endif + assign total_errors = 0 + assign contracts = '' | split: ',' + + for page in (1..total_pages) + graphql tests = 'modules/tests/test_files/search', path: context.params.name, page: page, per_page: per_page | dig: "admin_liquid_partials" | dig: "results" + for test in tests + log test, type: test_name + assign contract = '{ "errors": {}, "success": true, "total": 0 }' | parse_json + + # platformos-check-disable ConvertIncludeToRender + include test.path, registry: test.path, contract: contract + # platformos-check-enable ConvertIncludeToRender + hash_assign contract['test_path'] = test.path + assign contracts = contracts | add_to_array: contract + assign total_errors = total_errors | plus: contract.errors.size + endfor + endfor + assign __stop = "now" | to_time + assign total_duration = __start | time_diff: __stop, 'ms' | round + + assign data = '{}' | parse_json + hash_assign data['contracts'] = contracts + hash_assign data['total_errors'] = total_errors + hash_assign data['total_duration'] = total_duration + + assign test_formatter = format | default: 'html' | prepend: 'modules/tests/tests/show_' + # platformos-check-disable ConvertIncludeToRender + include test_formatter, contracts: contracts, total_errors: total_errors, total_duration: total_duration, test_name: test_name + # platformos-check-enable ConvertIncludeToRender + if total_errors > 0 + response_status 500 + endif +%} diff --git a/test/fixtures/test/with-tests-module/modules/tests/public/lib/helpers/register_error.liquid b/test/fixtures/test/with-tests-module/modules/tests/public/lib/helpers/register_error.liquid new file mode 100644 index 00000000..11dfa1d1 --- /dev/null +++ b/test/fixtures/test/with-tests-module/modules/tests/public/lib/helpers/register_error.liquid @@ -0,0 +1,27 @@ +{% comment %} + @params + contract - { errors: {}, success: true } + field_name + message: + key: i18n to be resolved into message +{% endcomment %} + +{% liquid + assign key = key | default: null + assign message = message | default: null + if key + assign msg = key | t + else + assign msg = message + endif + + assign errors = contract.errors + + assign field_erorrs = errors[field_name] | default: '[]' | parse_json + assign field_erorrs = field_erorrs | array_add: msg + + hash_assign errors[field_name] = field_erorrs + hash_assign contract['success'] = false + + return contract +%} diff --git a/test/fixtures/test/with-tests-module/modules/tests/public/lib/queries/sent_mails/find.liquid b/test/fixtures/test/with-tests-module/modules/tests/public/lib/queries/sent_mails/find.liquid new file mode 100644 index 00000000..4ab4a057 --- /dev/null +++ b/test/fixtures/test/with-tests-module/modules/tests/public/lib/queries/sent_mails/find.liquid @@ -0,0 +1,8 @@ +{% liquid + if id == blank + return null + endif + + graphql r = 'modules/tests/sent_mails/search', id: id, limit: 1 + return r.mails.results.first +%} diff --git a/test/fixtures/test/with-tests-module/modules/tests/public/lib/queries/sent_mails/search.liquid b/test/fixtures/test/with-tests-module/modules/tests/public/lib/queries/sent_mails/search.liquid new file mode 100644 index 00000000..066ec067 --- /dev/null +++ b/test/fixtures/test/with-tests-module/modules/tests/public/lib/queries/sent_mails/search.liquid @@ -0,0 +1,4 @@ +{% liquid + graphql r = 'modules/tests/sent_mails/search', limit: limit, page: page + return r.mails +%} diff --git a/test/fixtures/test/with-tests-module/modules/tests/public/translations/en/should.yml b/test/fixtures/test/with-tests-module/modules/tests/public/translations/en/should.yml new file mode 100644 index 00000000..6f40d6fc --- /dev/null +++ b/test/fixtures/test/with-tests-module/modules/tests/public/translations/en/should.yml @@ -0,0 +1,16 @@ +en: + should: + be_false: should be false + be_valid: should be valid + equal: expected %{given} to equal %{expected} + equal_not_verbose: does not match + have_key: key should exist in "%{field_name}" + have_key_with_value: should have value "%{value}" + match: match + be_blank: should be blank + be_true: should be true + not: + be_empty: should not be empty + be_blank: should not be blank + be_valid: should not be valid + be_true: should not be true diff --git a/test/fixtures/test/with-tests-module/modules/tests/public/views/layouts/mailer.html.liquid b/test/fixtures/test/with-tests-module/modules/tests/public/views/layouts/mailer.html.liquid new file mode 100644 index 00000000..c33042aa --- /dev/null +++ b/test/fixtures/test/with-tests-module/modules/tests/public/views/layouts/mailer.html.liquid @@ -0,0 +1,165 @@ + + + + + + + + + + + {% liquid + assign url = 'https://' | append: context.location.host + %} + +
+

+ +
+ {{ 'app.title' | t: default: 'App' }} +
+

+ + {{ content_for_layout }} + + +
+ + + diff --git a/test/fixtures/test/with-tests-module/modules/tests/public/views/layouts/test.liquid b/test/fixtures/test/with-tests-module/modules/tests/public/views/layouts/test.liquid new file mode 100644 index 00000000..6b57c725 --- /dev/null +++ b/test/fixtures/test/with-tests-module/modules/tests/public/views/layouts/test.liquid @@ -0,0 +1,154 @@ + + + + + + + +
+
+ {{ content_for_layout }} +
+
+ + diff --git a/test/fixtures/test/with-tests-module/modules/tests/public/views/pages/_tests/index.html.liquid b/test/fixtures/test/with-tests-module/modules/tests/public/views/pages/_tests/index.html.liquid new file mode 100644 index 00000000..5db7ce85 --- /dev/null +++ b/test/fixtures/test/with-tests-module/modules/tests/public/views/pages/_tests/index.html.liquid @@ -0,0 +1,10 @@ +--- +layout: modules/tests/test +--- +{% liquid + if context.environment == 'staging' or context.environment == 'development' + graphql tests = 'modules/tests/test_files/search', path: context.params.name | dig: "admin_liquid_partials" | dig: "results" + + render 'modules/tests/tests/index', tests: tests + endif +%} diff --git a/test/fixtures/test/with-tests-module/modules/tests/public/views/pages/_tests/index.js.liquid b/test/fixtures/test/with-tests-module/modules/tests/public/views/pages/_tests/index.js.liquid new file mode 100644 index 00000000..07256730 --- /dev/null +++ b/test/fixtures/test/with-tests-module/modules/tests/public/views/pages/_tests/index.js.liquid @@ -0,0 +1,28 @@ +--- +layout: '' +--- +{% liquid + if context.environment == 'staging' or context.environment == 'development' + assign per_page = 100 + graphql total_pages = 'modules/tests/test_files/count', per_page: per_page, path: context.params.name | dig: "admin_liquid_partials" | dig: "total_pages" + + assign result = '[]' | parse_json + + for page in (1..total_pages) + graphql tests = 'modules/tests/test_files/search', path: context.params.name, page: page, per_page: per_page | dig: "admin_liquid_partials" | dig: "results" + + for test in tests + assign test_name = test.path | remove_first: 'lib/test/' | remove_first: '_test' + assign test_url = '/_tests/run.js?test_name=' | append: test_name + assign test_object = '{}' | parse_json + hash_assign test_object['name'] = test_name + hash_assign test_object['url'] = test_url + assign result = result | add_to_array: test_object + endfor + endfor + + echo result | json + else + echo '{"error":"Tests can only be accessed in staging or development environment"}' + endif +%} diff --git a/test/fixtures/test/with-tests-module/modules/tests/public/views/pages/_tests/run.html.liquid b/test/fixtures/test/with-tests-module/modules/tests/public/views/pages/_tests/run.html.liquid new file mode 100644 index 00000000..78ba8fb2 --- /dev/null +++ b/test/fixtures/test/with-tests-module/modules/tests/public/views/pages/_tests/run.html.liquid @@ -0,0 +1,11 @@ +--- +layout: modules/tests/test +--- +{% liquid + if context.environment == 'staging' or context.environment == 'development' + assign test_name = 5 | random_string | prepend: "liquid_test_" + # platformos-check-disable ConvertIncludeToRender + include 'modules/tests/commands/run', format: context.params.formatter, test_name: test_name + # platformos-check-enable ConvertIncludeToRender + endif +%} diff --git a/test/fixtures/test/with-tests-module/modules/tests/public/views/pages/_tests/run.js.liquid b/test/fixtures/test/with-tests-module/modules/tests/public/views/pages/_tests/run.js.liquid new file mode 100644 index 00000000..d36090cc --- /dev/null +++ b/test/fixtures/test/with-tests-module/modules/tests/public/views/pages/_tests/run.js.liquid @@ -0,0 +1,13 @@ +--- +layout: '' +--- +{% liquid + if context.environment == 'staging' or context.environment == 'development' + assign test_name = 5 | random_string | prepend: "liquid_test_" + # platformos-check-disable ConvertIncludeToRender + include 'modules/tests/commands/run', format: 'js', test_name: test_name + # platformos-check-enable ConvertIncludeToRender + else + echo '{"success":false,"error":"Tests can only be run in staging or development environment"}' + endif +%} diff --git a/test/fixtures/test/with-tests-module/modules/tests/public/views/pages/_tests/run_async.js.liquid b/test/fixtures/test/with-tests-module/modules/tests/public/views/pages/_tests/run_async.js.liquid new file mode 100644 index 00000000..79956bd9 --- /dev/null +++ b/test/fixtures/test/with-tests-module/modules/tests/public/views/pages/_tests/run_async.js.liquid @@ -0,0 +1,13 @@ +--- +layout: '' +--- +{% if context.environment == 'staging' or context.environment == 'development' %} + {% assign test_name = 5 | random_string | prepend: "liquid_test_" %} + {% background source_name: "liquid_tests", test_name: test_name %} + {% include 'modules/tests/commands/run', format: 'log_js', test_name: test_name %} + {% endbackground %} + {% assign result = '{}' | parse_json | hash_merge: test_name: test_name %} + {{ result }} +{% else %} + {"success":false,"error":"Tests can only be run in staging or development environment"} +{% endif %} diff --git a/test/fixtures/test/with-tests-module/modules/tests/public/views/pages/_tests/run_async.liquid b/test/fixtures/test/with-tests-module/modules/tests/public/views/pages/_tests/run_async.liquid new file mode 100644 index 00000000..7cb08dfb --- /dev/null +++ b/test/fixtures/test/with-tests-module/modules/tests/public/views/pages/_tests/run_async.liquid @@ -0,0 +1,10 @@ +--- +layout: '' +--- +{% if context.environment == 'staging' %} + {% assign test_name = 5 | random_string | prepend: "liquid_test_" %} + {% background source_name: "liquid_tests", test_name: test_name %} + {% include 'modules/tests/commands/run', format: 'log', test_name: test_name %} + {% endbackground %} + {{ test_name }} +{% endif %} diff --git a/test/fixtures/test/with-tests-module/modules/tests/public/views/pages/_tests/sent_mails/index.liquid b/test/fixtures/test/with-tests-module/modules/tests/public/views/pages/_tests/sent_mails/index.liquid new file mode 100644 index 00000000..2e5885b2 --- /dev/null +++ b/test/fixtures/test/with-tests-module/modules/tests/public/views/pages/_tests/sent_mails/index.liquid @@ -0,0 +1,11 @@ +--- +layout: modules/tests/test +--- +{% liquid + if context.environment == 'staging' or context.environment == 'development' + assign page = context.params.page | to_positive_integer: 1 + function mails = 'modules/tests/queries/sent_mails/search', limit: 20, page: page + + render 'modules/tests/sent_mails/list', mails: mails + endif +%} diff --git a/test/fixtures/test/with-tests-module/modules/tests/public/views/pages/_tests/sent_mails/show.liquid b/test/fixtures/test/with-tests-module/modules/tests/public/views/pages/_tests/sent_mails/show.liquid new file mode 100644 index 00000000..5c612fe9 --- /dev/null +++ b/test/fixtures/test/with-tests-module/modules/tests/public/views/pages/_tests/sent_mails/show.liquid @@ -0,0 +1,11 @@ +--- +slug: _tests/sent_mails/:id +layout: modules/tests/test +--- +{% liquid + if context.environment == 'staging' or context.environment == 'development' + function mail = 'modules/tests/queries/sent_mails/find', id: context.params.id + + render 'modules/tests/sent_mails/show', mail: mail + endif +%} diff --git a/test/fixtures/test/with-tests-module/modules/tests/public/views/partials/sent_mails/list.liquid b/test/fixtures/test/with-tests-module/modules/tests/public/views/partials/sent_mails/list.liquid new file mode 100644 index 00000000..eb8e2452 --- /dev/null +++ b/test/fixtures/test/with-tests-module/modules/tests/public/views/partials/sent_mails/list.liquid @@ -0,0 +1,20 @@ +

Sent mails

+
+
+
Subject
+
To
+
Sent at
+
+
+
+ {% for mail in mails.results %} +
    +
  • {{ mail.options.subject }}
  • +
  • {{ mail.options.to | join: ',' }}
  • +
  • {{ mail.created_at | l }}
  • +
  • Show
  • +
+ {% endfor %} +
+
+ {% render 'modules/tests/sent_mails/pagination', collection: mails %} diff --git a/test/fixtures/test/with-tests-module/modules/tests/public/views/partials/sent_mails/pagination.liquid b/test/fixtures/test/with-tests-module/modules/tests/public/views/partials/sent_mails/pagination.liquid new file mode 100644 index 00000000..1ecf01a8 --- /dev/null +++ b/test/fixtures/test/with-tests-module/modules/tests/public/views/partials/sent_mails/pagination.liquid @@ -0,0 +1,66 @@ +{% comment %} + Required params: + collection: collection + current_page: integer + Optional params: + button_attrs: string + container_class: string +{% endcomment %} +{% liquid + assign container_class = container_class | default: "subtitle flex justify-center md:justify-end items-center mt-8 mx-auto md:mr-0 md:ms-auto" + assign button_attrs = button_attrs | default: '' | html_safe + assign current_page = collection.current_page | to_positive_integer: 1 + assign page_name = page_name | default: 'page' +%} + +{% if collection.has_previous_page or collection.has_next_page %} +
+ +
+ {% if collection.has_previous_page %} + + {% endif %} + + {% liquid + assign range_low = current_page | minus: 2 | at_least: 1 + assign range_high = range_low | plus: 4 | at_most: collection.total_pages + %} + {% for page_num in (range_low..range_high) %} + {% if page_num == current_page %} + {{ page_num }} + {% else %} + + {% endif %} + {% endfor %} + + {% if collection.has_next_page %} + + {% endif %} +
+
+{% endif %} diff --git a/test/fixtures/test/with-tests-module/modules/tests/public/views/partials/sent_mails/show.liquid b/test/fixtures/test/with-tests-module/modules/tests/public/views/partials/sent_mails/show.liquid new file mode 100644 index 00000000..2fad3804 --- /dev/null +++ b/test/fixtures/test/with-tests-module/modules/tests/public/views/partials/sent_mails/show.liquid @@ -0,0 +1,8 @@ +Back +

Sent mail

+

Sujbect: {{ mail.options.subject }}

+

To: {{ mail.options.to | join: ',' }}

+

Sent at: {{ mail.created_at | l }}

+
+ +{% print mail.content %} diff --git a/test/fixtures/test/with-tests-module/modules/tests/public/views/partials/tests/index.liquid b/test/fixtures/test/with-tests-module/modules/tests/public/views/partials/tests/index.liquid new file mode 100644 index 00000000..424cd724 --- /dev/null +++ b/test/fixtures/test/with-tests-module/modules/tests/public/views/partials/tests/index.liquid @@ -0,0 +1,18 @@ + +
+
+
Test
+
+
+
+ {% for test in tests %} +
    + {% assign test_name = test.path | split: 'test/' | last %} +
  • {{ test.path }}
  • +
  • Run
  • +
+ {% endfor %} +
diff --git a/test/fixtures/test/with-tests-module/modules/tests/public/views/partials/tests/show_html.liquid b/test/fixtures/test/with-tests-module/modules/tests/public/views/partials/tests/show_html.liquid new file mode 100644 index 00000000..c2ce5684 --- /dev/null +++ b/test/fixtures/test/with-tests-module/modules/tests/public/views/partials/tests/show_html.liquid @@ -0,0 +1,25 @@ +
+{% assign total = 0 %} +{% liquid + for contract in contracts + render 'modules/tests/tests/test_report_html', name: contract.test_path, contract: contract + assign total = total | plus: contract.total + endfor +%} + +{% if total_errors > 0 %} +

Total errors: {{ total_errors }}

+ {% response_status 500 %} +{% endif %} + +
+ +

+ {% if total_errors > 0 %} + Failure. + {% else %} + Success. + {% endif %} +Assertions: {{ total }}. Failed: {{ total_errors }}. Time: {{ total_duration }}ms +

+
diff --git a/test/fixtures/test/with-tests-module/modules/tests/public/views/partials/tests/show_js.liquid b/test/fixtures/test/with-tests-module/modules/tests/public/views/partials/tests/show_js.liquid new file mode 100644 index 00000000..f9e4d755 --- /dev/null +++ b/test/fixtures/test/with-tests-module/modules/tests/public/views/partials/tests/show_js.liquid @@ -0,0 +1,30 @@ +{% liquid + assign result = '{}' | parse_json + assign total_assertions = 0 + + assign tests_array = '[]' | parse_json + for contract in contracts + assign total_assertions = total_assertions | plus: contract.total + + assign test_result = '{}' | parse_json + hash_assign test_result['name'] = contract.test_path + hash_assign test_result['success'] = contract.success + hash_assign test_result['assertions'] = contract.total + hash_assign test_result['errors'] = contract.errors + + assign tests_array = tests_array | add_to_array: test_result + endfor + + if total_errors > 0 + hash_assign result['success'] = false + else + hash_assign result['success'] = true + endif + + hash_assign result['total_tests'] = contracts.size + hash_assign result['total_assertions'] = total_assertions + hash_assign result['total_errors'] = total_errors + hash_assign result['duration_ms'] = total_duration + hash_assign result['tests'] = tests_array +%} +{{ result | json }} diff --git a/test/fixtures/test/with-tests-module/modules/tests/public/views/partials/tests/show_log.liquid b/test/fixtures/test/with-tests-module/modules/tests/public/views/partials/tests/show_log.liquid new file mode 100644 index 00000000..9abe2cc7 --- /dev/null +++ b/test/fixtures/test/with-tests-module/modules/tests/public/views/partials/tests/show_log.liquid @@ -0,0 +1,7 @@ +{% capture result %} + {% render 'modules/tests/tests/show_text', contracts: contracts, total_errors: total_errors, total_duration: total_duration, test_name: test_name %} +{% endcapture %} +{% liquid + assign log_type = test_name | append: ' SUMMARY' + log result, type: log_type +%} diff --git a/test/fixtures/test/with-tests-module/modules/tests/public/views/partials/tests/show_log_js.liquid b/test/fixtures/test/with-tests-module/modules/tests/public/views/partials/tests/show_log_js.liquid new file mode 100644 index 00000000..ffd51c6b --- /dev/null +++ b/test/fixtures/test/with-tests-module/modules/tests/public/views/partials/tests/show_log_js.liquid @@ -0,0 +1,8 @@ +{% capture result %} + {% render 'modules/tests/tests/show_js', contracts: contracts, total_errors: total_errors, total_duration: total_duration, test_name: test_name %} +{% endcapture %} +{% assign result = result | html_safe %} +{% liquid + assign log_type = test_name | append: ' SUMMARY' + log result, type: log_type +%} diff --git a/test/fixtures/test/with-tests-module/modules/tests/public/views/partials/tests/show_text.liquid b/test/fixtures/test/with-tests-module/modules/tests/public/views/partials/tests/show_text.liquid new file mode 100644 index 00000000..3108c53b --- /dev/null +++ b/test/fixtures/test/with-tests-module/modules/tests/public/views/partials/tests/show_text.liquid @@ -0,0 +1,22 @@ +Liquid tests +------------------------ +{% liquid + for contract in contracts + render 'modules/tests/tests/test_report_text', name: contract.test_path, contract: contract + assign total = total | plus: contract.total + endfor +%} +------------------------ +{% liquid + if total_errors > 0 + assign result = 'Failed' + else + assign result = 'Success' + endif +%} +{{ result }}_{{ test_name | strip }} +{% if total_errors > 0 %} + Total errors: {{ total_errors }} +{% endif %} + +Assertions: {{ total }}. Failed: {{ total_errors }}. Time: {{ total_duration }}ms diff --git a/test/fixtures/test/with-tests-module/modules/tests/public/views/partials/tests/test_report_html.liquid b/test/fixtures/test/with-tests-module/modules/tests/public/views/partials/tests/test_report_html.liquid new file mode 100644 index 00000000..2ff1a431 --- /dev/null +++ b/test/fixtures/test/with-tests-module/modules/tests/public/views/partials/tests/test_report_html.liquid @@ -0,0 +1,17 @@ +
+
+ {% assign test_name = name | replace: 'test/', '' %} + {{ test_name }} + + (run test) + +
+
+ {% for e in contract.errors %} +
+
{{ e[0] }}
+
{{ e[1] | join: ",
" | html_safe }}
+
+ {% endfor %} +
+
diff --git a/test/fixtures/test/with-tests-module/modules/tests/public/views/partials/tests/test_report_text.liquid b/test/fixtures/test/with-tests-module/modules/tests/public/views/partials/tests/test_report_text.liquid new file mode 100644 index 00000000..29a84503 --- /dev/null +++ b/test/fixtures/test/with-tests-module/modules/tests/public/views/partials/tests/test_report_text.liquid @@ -0,0 +1,5 @@ +{% assign test_name = name | replace: 'test/', '' %} +{{ test_name }} +{% for e in contract.errors %} + {{ e[0] }} {{ e[1] | join: ", " }} +{% endfor %} diff --git a/test/fixtures/test/with-tests-module/modules/tests/template-values.json b/test/fixtures/test/with-tests-module/modules/tests/template-values.json new file mode 100644 index 00000000..59e310f0 --- /dev/null +++ b/test/fixtures/test/with-tests-module/modules/tests/template-values.json @@ -0,0 +1,7 @@ +{ + "name": "Pos Module Tests", + "machine_name": "tests", + "type": "module", + "version": "1.2.0", + "dependencies": {} +} diff --git a/test/fixtures/test/without-tests-module/app/pos-modules.json b/test/fixtures/test/without-tests-module/app/pos-modules.json new file mode 100644 index 00000000..297679bd --- /dev/null +++ b/test/fixtures/test/without-tests-module/app/pos-modules.json @@ -0,0 +1,3 @@ +{ + "modules": {} +} \ No newline at end of file diff --git a/test/test-run.test.js b/test/test-run.test.js new file mode 100644 index 00000000..be895405 --- /dev/null +++ b/test/test-run.test.js @@ -0,0 +1,87 @@ +/* global jest */ + +const exec = require('./utils/exec'); +const cliPath = require('./utils/cliPath'); + +require('dotenv').config(); + +const cwd = name => `${process.cwd()}/test/fixtures/test/${name}`; + +const run = (fixtureName, options) => exec(`${cliPath} test run ${options || ''}`, { cwd: cwd(fixtureName), env: process.env }); +const deploy = (fixtureName) => exec(`${cliPath} deploy staging`, { cwd: cwd(fixtureName), env: process.env }); + +jest.setTimeout(200000); // Test run can take a while due to log polling + +describe('Test run command', () => { + // Deploy the test fixtures before running any tests + beforeAll(async () => { + const { stdout, stderr } = await deploy('with-tests-module'); + if (!stdout.includes('Deploy succeeded')) { + console.error('Deploy failed:', stderr); + throw new Error('Failed to deploy test fixtures'); + } + }); + + test('displays instance URL when running tests', async () => { + const { stdout, stderr } = await run('with-tests-module', 'staging'); + + expect(stdout).toMatch(`Running tests on: ${process.env.MPKIT_URL}`); + }); + + // Note: This test requires a staging instance WITHOUT the tests module installed. + // Since integration tests require the tests module to be deployed, this test + // is skipped when run against the same instance. + test.skip('shows error when tests module is not installed', async () => { + const { stderr } = await run('without-tests-module', 'staging'); + + expect(stderr).toMatch('Tests module not found'); + }); + + test('runs all tests and shows results when no test name provided', async () => { + const { stdout, stderr, code } = await run('with-tests-module', 'staging'); + + // Verify test execution started + expect(stdout).toMatch('Starting test run...'); + + // Verify test results are actually displayed (not just hanging) + // The output should include either test results summary or individual test status + const hasTestResults = stdout.includes('passed') || + stdout.includes('failed') || + stdout.includes('Test Results:') || + stdout.includes('total)'); + + expect(hasTestResults).toBe(true); + }); + + test('runs a single passing test by name and shows success', async () => { + const { stdout, stderr, code } = await run('with-tests-module', 'staging example_test'); + + // Verify test results are displayed + expect(stdout).toMatch('Test Results:'); + + // Verify the passing test is shown as passed (with checkmark) + expect(stdout).toMatch(/✓.*example_test/); + + // Verify summary shows 1 passed + expect(stdout).toMatch('1 passed'); + + // Exit code should be 0 for passing test + expect(code).toBe(0); + }); + + test('runs a single failing test by name and shows failure', async () => { + const { stdout, stderr, code } = await run('with-tests-module', 'staging failing_test'); + + // Verify test results are displayed + expect(stdout + stderr).toMatch('Test Results:'); + + // Verify the failing test is shown as failed (with X mark) + expect(stdout + stderr).toMatch(/✗.*failing_test/); + + // Verify summary shows 1 failed + expect(stdout + stderr).toMatch('1 failed'); + + // Exit code should be 1 for failing test + expect(code).toBe(1); + }); +}); \ No newline at end of file diff --git a/test/test.test.js b/test/test.test.js new file mode 100644 index 00000000..d2233be6 --- /dev/null +++ b/test/test.test.js @@ -0,0 +1,612 @@ +jest.mock('../lib/apiRequest', () => ({ + apiRequest: jest.fn() +})); + +require('dotenv').config(); + +const Gateway = require('../lib/proxy'); + +describe('Gateway test methods', () => { + const { apiRequest } = require('../lib/apiRequest'); + + beforeEach(() => { + apiRequest.mockReset(); + }); + + describe('test(name)', () => { + test('calls apiRequest with correct URL for single test (JS format)', async () => { + apiRequest.mockResolvedValue({ passed: 1, failed: 0, total: 1, tests: [] }); + + const gateway = new Gateway({ url: 'http://example.com', token: '1234', email: 'test@example.com' }); + const result = await gateway.test('example_test'); + + expect(apiRequest).toHaveBeenCalledWith({ + method: 'GET', + uri: 'http://example.com/_tests/run.js?name=example_test', + formData: undefined, + json: true, + forever: undefined, + request: expect.any(Function) + }); + expect(result).toEqual({ passed: 1, failed: 0, total: 1, tests: [] }); + }); + + test('handles test with path in name', async () => { + apiRequest.mockResolvedValue({ passed: 1, failed: 0, total: 1 }); + + const gateway = new Gateway({ url: 'http://example.com', token: '1234', email: 'test@example.com' }); + await gateway.test('test/examples/assertions_test'); + + expect(apiRequest).toHaveBeenCalledWith(expect.objectContaining({ + uri: 'http://example.com/_tests/run.js?name=test/examples/assertions_test' + })); + }); + }); + + describe('testRunAsync()', () => { + test('calls apiRequest with run_async endpoint (no .js extension for v1.1.0+)', async () => { + apiRequest.mockResolvedValue({ test_run_id: 'test-run-123' }); + + const gateway = new Gateway({ url: 'http://example.com', token: '1234', email: 'test@example.com' }); + const result = await gateway.testRunAsync(); + + expect(apiRequest).toHaveBeenCalledWith({ + method: 'GET', + uri: 'http://example.com/_tests/run_async', + formData: undefined, + json: true, + forever: undefined, + request: expect.any(Function) + }); + expect(result).toEqual({ test_run_id: 'test-run-123' }); + }); + + test('handles error response', async () => { + apiRequest.mockResolvedValue({ error: 'Tests module not found' }); + + const gateway = new Gateway({ url: 'http://example.com', token: '1234', email: 'test@example.com' }); + const result = await gateway.testRunAsync(); + + expect(result).toEqual({ error: 'Tests module not found' }); + }); + }); + + describe('formatTestLog', () => { + let formatTestLog; + + beforeEach(() => { + const testRunModule = require('../bin/pos-cli-test-run'); + formatTestLog = testRunModule.formatTestLog; + }); + + test('highlights test log with test path (new test indicator)', () => { + const logRow = { + message: '{"path": "app/lib/test/example_test.liquid"}', + type: 'liquid_test_abc123' + }; + + const result = formatTestLog(logRow, true); + + // Should contain the arrow indicator and path + expect(result).toContain('▶'); + expect(result).toContain('app/lib/test/example_test.liquid'); + }); + + test('displays test log without path normally', () => { + const logRow = { + message: 'Test assertion passed', + type: 'liquid_test_abc123' + }; + + const result = formatTestLog(logRow, true); + + // Should not contain the arrow indicator + expect(result).not.toContain('▶'); + expect(result).toContain('Test assertion passed'); + }); + + test('dims debug logs (type != test_name)', () => { + const logRow = { + message: 'Debug: checking variable value', + type: 'debug' + }; + + const result = formatTestLog(logRow, false); + + // Should contain the debug prefix with type + expect(result).toContain('[debug:debug]'); + expect(result).toContain('Debug: checking variable value'); + }); + + test('shows debug type in dimmed log output', () => { + const logRow = { + message: 'Custom debug message from test', + type: 'custom_debug' + }; + + const result = formatTestLog(logRow, false); + + expect(result).toContain('[debug:custom_debug]'); + expect(result).toContain('Custom debug message from test'); + }); + + test('highlights test path in modules directory', () => { + const logRow = { + message: '{"path": "modules/my_module/test/unit_test.liquid"}', + type: 'liquid_test_xyz789' + }; + + const result = formatTestLog(logRow, true); + + expect(result).toContain('▶'); + expect(result).toContain('modules/my_module/test/unit_test.liquid'); + }); + }); + + describe('TestLogStream', () => { + let TestLogStream; + + beforeEach(() => { + const testRunModule = require('../bin/pos-cli-test-run'); + TestLogStream = testRunModule.TestLogStream; + }); + + describe('parseJsonSummary', () => { + test('parses successful test completion JSON from tests module 1.1.1+', () => { + const stream = new TestLogStream({}); + + const message = JSON.stringify({ + success: true, + total_tests: 5, + total_assertions: 16, + total_errors: 0, + duration_ms: 26, + test_run_id: 'test-run-123', + tests: [ + { name: "test/array_test", success: true, assertions: 2, errors: {} }, + { name: "test/examples/assertions_test", success: true, assertions: 4, errors: {} }, + { name: "test/example_test", success: true, assertions: 5, errors: {} }, + { name: "test/object_test", success: true, assertions: 3, errors: {} }, + { name: "test/string_test", success: true, assertions: 2, errors: {} } + ] + }); + + const result = stream.parseJsonSummary(message); + + expect(result).toEqual({ + total: 5, + passed: 5, + failed: 0, + assertions: 16, + tests: [ + { name: "test/array_test", status: "passed", passed: true, assertions: 2 }, + { name: "test/examples/assertions_test", status: "passed", passed: true, assertions: 4 }, + { name: "test/example_test", status: "passed", passed: true, assertions: 5 }, + { name: "test/object_test", status: "passed", passed: true, assertions: 3 }, + { name: "test/string_test", status: "passed", passed: true, assertions: 2 } + ], + duration: 26 + }); + }); + + test('parses failed test completion JSON with error details', () => { + const stream = new TestLogStream({}); + + const message = JSON.stringify({ + success: false, + total_tests: 3, + total_assertions: 10, + total_errors: 1, + duration_ms: 45, + test_run_id: 'test-run-123', + tests: [ + { name: "test/passing_test", success: true, assertions: 3, errors: {} }, + { name: "test/failing_test", success: false, assertions: 2, errors: { expected: "field to be 2", actual: "field is 1" } }, + { name: "test/another_passing_test", success: true, assertions: 5, errors: {} } + ] + }); + + const result = stream.parseJsonSummary(message); + + expect(result).toEqual({ + total: 3, + passed: 2, + failed: 1, + assertions: 10, + tests: [ + { name: "test/passing_test", status: "passed", passed: true, assertions: 3 }, + { name: "test/failing_test", status: "failed", passed: false, assertions: 2, error: "{\"expected\":\"field to be 2\",\"actual\":\"field is 1\"}" }, + { name: "test/another_passing_test", status: "passed", passed: true, assertions: 5 } + ], + duration: 45 + }); + }); + + test('handles alternative field names (total instead of total_tests)', () => { + const stream = new TestLogStream({}); + + const message = JSON.stringify({ + success: true, + total: 4, + assertions: 8, + duration: 30, + test_run_id: 'test-run-123', + tests: [ + { name: "test1", success: true, assertions: 2, errors: {} }, + { name: "test2", success: true, assertions: 2, errors: {} }, + { name: "test3", success: true, assertions: 2, errors: {} }, + { name: "test4", success: true, assertions: 2, errors: {} } + ] + }); + + const result = stream.parseJsonSummary(message); + + expect(result).toEqual({ + total: 4, + passed: 4, + failed: 0, + assertions: 8, + tests: [ + { name: "test1", status: "passed", passed: true, assertions: 2 }, + { name: "test2", status: "passed", passed: true, assertions: 2 }, + { name: "test3", status: "passed", passed: true, assertions: 2 }, + { name: "test4", status: "passed", passed: true, assertions: 2 } + ], + duration: 30 + }); + }); + + test('returns null for invalid JSON', () => { + const stream = new TestLogStream({}); + const invalidJson = '{ "invalid": json }'; + const result = stream.parseJsonSummary(invalidJson); + expect(result).toBeNull(); + }); + }); + + describe('isValidTestSummaryJson', () => { + test('identifies valid test summary JSON', () => { + const stream = new TestLogStream({}); + + const validMessage = JSON.stringify({ + success: true, + total_tests: 5, + total_assertions: 16, + duration_ms: 26, + test_run_id: 'test-run-123', + tests: [] + }); + + expect(stream.isValidTestSummaryJson(validMessage)).toBe(true); + }); + + test('rejects JSON without tests array', () => { + const stream = new TestLogStream({}); + + const invalidMessage = JSON.stringify({ + success: true, + total_tests: 5, + duration_ms: 26 + }); + + expect(stream.isValidTestSummaryJson(invalidMessage)).toBe(false); + }); + + test('rejects JSON without success field', () => { + const stream = new TestLogStream({}); + + const invalidMessage = JSON.stringify({ + total_tests: 5, + duration_ms: 26, + tests: [] + }); + + expect(stream.isValidTestSummaryJson(invalidMessage)).toBe(false); + }); + + test('rejects non-test JSON', () => { + const stream = new TestLogStream({}); + + const invalidMessage = JSON.stringify({ + path: "test/array_test" + }); + + expect(stream.isValidTestSummaryJson(invalidMessage)).toBe(false); + }); + }); + + describe('testCompleted event emission', () => { + test('emits testCompleted only once even when duplicate JSON summaries are received', () => { + const stream = new TestLogStream({}); + const mockEmit = jest.fn(); + stream.emit = mockEmit; + + const testSummaryJson = JSON.stringify({ + success: true, + total_tests: 5, + total_assertions: 16, + duration_ms: 26, + test_run_id: 'test-run-123', + tests: [ + { name: "test/array_test", success: true, assertions: 2, errors: {} }, + { name: "test/examples/assertions_test", success: true, assertions: 4, errors: {} }, + { name: "test/example_test", success: true, assertions: 5, errors: {} }, + { name: "test/object_test", success: true, assertions: 3, errors: {} }, + { name: "test/string_test", success: true, assertions: 2, errors: {} } + ] + }); + + // Simulate receiving the same JSON summary multiple times + const logRow1 = { id: 1, message: testSummaryJson }; + const logRow2 = { id: 2, message: testSummaryJson }; + const logRow3 = { id: 3, message: testSummaryJson }; + + // Process first occurrence + stream.processLogMessage(logRow1); + // Process second occurrence (duplicate) + stream.processLogMessage(logRow2); + // Process third occurrence (duplicate) + stream.processLogMessage(logRow3); + + // Should emit testCompleted only once + expect(mockEmit).toHaveBeenCalledTimes(1); + expect(mockEmit).toHaveBeenCalledWith('testCompleted', expect.any(Object)); + + // Verify the emitted results are correct + const emittedResults = mockEmit.mock.calls[0][1]; + expect(emittedResults.total).toBe(5); + expect(emittedResults.passed).toBe(5); + expect(emittedResults.failed).toBe(0); + }); + + test('only processes JSON summaries that match the testRunId', () => { + const stream = new TestLogStream({}, 30000, 'test-run-123'); + const mockEmit = jest.fn(); + stream.emit = mockEmit; + + const matchingSummaryJson = JSON.stringify({ + success: false, + total_tests: 2, + total_errors: 2, + test_run_id: 'test-run-123', + tests: [ + { name: "test1", success: false, assertions: 1, errors: { message: "failed" } }, + { name: "test2", success: false, assertions: 1, errors: { message: "failed" } } + ] + }); + + const nonMatchingSummaryJson = JSON.stringify({ + success: true, + total_tests: 2, + test_run_id: 'test-run-456', + tests: [ + { name: "test1", success: true, assertions: 1, errors: {} }, + { name: "test2", success: true, assertions: 1, errors: {} } + ] + }); + + // Process non-matching summary first (should be ignored) + stream.processLogMessage({ id: 1, message: nonMatchingSummaryJson }); + // Process matching summary (should be processed) + stream.processLogMessage({ id: 2, message: matchingSummaryJson }); + + // Should emit testCompleted only once for the matching summary + expect(mockEmit).toHaveBeenCalledTimes(1); + expect(mockEmit).toHaveBeenCalledWith('testCompleted', expect.any(Object)); + + // Verify the emitted results are from the matching (failing) summary + const emittedResults = mockEmit.mock.calls[0][1]; + expect(emittedResults.total).toBe(2); + expect(emittedResults.passed).toBe(0); + expect(emittedResults.failed).toBe(2); + }); + + test('ignores JSON summaries when no testRunId is set (backward compatibility)', () => { + const stream = new TestLogStream({}); // No testRunId + const mockEmit = jest.fn(); + stream.emit = mockEmit; + + const summaryWithIdJson = JSON.stringify({ + success: true, + total_tests: 1, + test_run_id: 'test-run-123', + tests: [{ name: "test1", success: true, assertions: 1, errors: {} }] + }); + + // Should still process summaries even with test_run_id when no filter is set + stream.processLogMessage({ id: 1, message: summaryWithIdJson }); + + expect(mockEmit).toHaveBeenCalledTimes(1); + expect(mockEmit).toHaveBeenCalledWith('testCompleted', expect.any(Object)); + }); + }); + + describe('testName filtering', () => { + test('detects test start with matching testName type', () => { + const stream = new TestLogStream({}, 30000, null, 'liquid_test_abc123'); + const mockEmit = jest.fn(); + stream.emit = mockEmit; + + const startLog = { + id: 1, + message: 'Starting unit tests', + error_type: 'liquid_test_abc123' + }; + + stream.processLogMessage(startLog); + + expect(mockEmit).toHaveBeenCalledWith('testStarted'); + expect(stream.testStarted).toBe(true); + }); + + test('ignores test start with non-matching testName type', () => { + const stream = new TestLogStream({}, 30000, null, 'liquid_test_abc123'); + const mockEmit = jest.fn(); + stream.emit = mockEmit; + + const startLog = { + id: 1, + message: 'Starting unit tests', + error_type: 'liquid_test_different' + }; + + stream.processLogMessage(startLog); + + expect(mockEmit).not.toHaveBeenCalled(); + expect(stream.testStarted).toBe(false); + }); + + test('detects test completion with testName SUMMARY type', () => { + const stream = new TestLogStream({}, 30000, null, 'liquid_test_abc123'); + stream.testStarted = true; // Simulate test already started + const mockEmit = jest.fn(); + stream.emit = mockEmit; + + const summaryJson = JSON.stringify({ + success: true, + total_tests: 2, + tests: [ + { name: "test1", success: true, assertions: 1, errors: {} }, + { name: "test2", success: true, assertions: 1, errors: {} } + ] + }); + + const summaryLog = { + id: 2, + message: summaryJson, + error_type: 'liquid_test_abc123 SUMMARY' + }; + + stream.processLogMessage(summaryLog); + + expect(mockEmit).toHaveBeenCalledWith('testCompleted', expect.any(Object)); + expect(stream.completed).toBe(true); + }); + + test('ignores summary with non-matching testName SUMMARY type', () => { + const stream = new TestLogStream({}, 30000, null, 'liquid_test_abc123'); + stream.testStarted = true; + const mockEmit = jest.fn(); + stream.emit = mockEmit; + + const summaryJson = JSON.stringify({ + success: true, + total_tests: 2, + tests: [ + { name: "test1", success: true, assertions: 1, errors: {} }, + { name: "test2", success: true, assertions: 1, errors: {} } + ] + }); + + const summaryLog = { + id: 2, + message: summaryJson, + error_type: 'liquid_test_different SUMMARY' + }; + + stream.processLogMessage(summaryLog); + + // Should emit testLog instead since it's during the test run + expect(mockEmit).toHaveBeenCalledWith('testLog', expect.any(Object), false); + expect(stream.completed).toBe(false); + }); + + test('emits testLog with isTestLog=true for logs with matching testName type', () => { + const stream = new TestLogStream({}, 30000, null, 'liquid_test_abc123'); + stream.testStarted = true; + const mockEmit = jest.fn(); + stream.emit = mockEmit; + + const testLog = { + id: 2, + message: '{"path": "app/lib/test/example_test.liquid"}', + error_type: 'liquid_test_abc123' + }; + + stream.processLogMessage(testLog); + + expect(mockEmit).toHaveBeenCalledWith('testLog', testLog, true); + }); + + test('emits testLog with isTestLog=false for logs with different type (debug logs)', () => { + const stream = new TestLogStream({}, 30000, null, 'liquid_test_abc123'); + stream.testStarted = true; + const mockEmit = jest.fn(); + stream.emit = mockEmit; + + const debugLog = { + id: 2, + message: 'Debug: checking variable value', + error_type: 'debug' + }; + + stream.processLogMessage(debugLog); + + expect(mockEmit).toHaveBeenCalledWith('testLog', debugLog, false); + }); + + test('does not emit logs before test started', () => { + const stream = new TestLogStream({}, 30000, null, 'liquid_test_abc123'); + const mockEmit = jest.fn(); + stream.emit = mockEmit; + + const earlyLog = { + id: 1, + message: 'Some early log from previous test run', + error_type: 'liquid_test_abc123' + }; + + stream.processLogMessage(earlyLog); + + expect(mockEmit).not.toHaveBeenCalled(); + }); + + test('does not emit logs after test completed', () => { + const stream = new TestLogStream({}, 30000, null, 'liquid_test_abc123'); + stream.testStarted = true; + stream.completed = true; + const mockEmit = jest.fn(); + stream.emit = mockEmit; + + const lateLog = { + id: 3, + message: 'Some late log', + error_type: 'liquid_test_abc123' + }; + + stream.processLogMessage(lateLog); + + expect(mockEmit).not.toHaveBeenCalled(); + }); + + test('filters noise from past test runs by only processing logs with matching testName', () => { + const stream = new TestLogStream({}, 30000, null, 'liquid_test_current'); + stream.testStarted = true; + const mockEmit = jest.fn(); + stream.emit = mockEmit; + + // Log from a past test run with different testName + const pastLog = { + id: 1, + message: 'Some past test log', + error_type: 'liquid_test_past' + }; + + // Log from current test run + const currentLog = { + id: 2, + message: 'Current test log', + error_type: 'liquid_test_current' + }; + + stream.processLogMessage(pastLog); + stream.processLogMessage(currentLog); + + // Should emit both logs, but with different isTestLog values + expect(mockEmit).toHaveBeenCalledTimes(2); + expect(mockEmit).toHaveBeenNthCalledWith(1, 'testLog', pastLog, false); // non-matching type = debug + expect(mockEmit).toHaveBeenNthCalledWith(2, 'testLog', currentLog, true); // matching type = test log + }); + }); + }); +}); \ No newline at end of file From d2224e8c86bd7dfd2350e3b12bd8e97cae74f174 Mon Sep 17 00:00:00 2001 From: Maciej Krajowski-Kukiel Date: Thu, 15 Jan 2026 19:39:51 +0100 Subject: [PATCH 5/9] add support for -f command --- bin/pos-cli-exec-graphql.js | 23 +++++++++++++++++++--- bin/pos-cli-exec-liquid.js | 23 +++++++++++++++++++--- test/exec-graphql.test.js | 31 ++++++++++++++++++++++++++++++ test/exec-liquid.test.js | 31 ++++++++++++++++++++++++++++++ test/fixtures/test-graphql.graphql | 1 + test/fixtures/test-liquid.liquid | 1 + 6 files changed, 104 insertions(+), 6 deletions(-) create mode 100644 test/fixtures/test-graphql.graphql create mode 100644 test/fixtures/test-liquid.liquid diff --git a/bin/pos-cli-exec-graphql.js b/bin/pos-cli-exec-graphql.js index f0d84fc6..bd7a1434 100644 --- a/bin/pos-cli-exec-graphql.js +++ b/bin/pos-cli-exec-graphql.js @@ -1,5 +1,6 @@ #!/usr/bin/env node +const fs = require('fs'); const { program } = require('commander'); const prompts = require('prompts'); const Gateway = require('../lib/proxy'); @@ -28,8 +29,24 @@ const confirmProductionExecution = async (environment) => { program .name('pos-cli exec graphql') .argument('', 'name of environment. Example: staging') - .argument('', 'graphql query to execute as string') - .action(async (environment, graphql) => { + .argument('[graphql]', 'graphql query to execute as string') + .option('-f, --file ', 'path to graphql file to execute') + .action(async (environment, graphql, options) => { + let query = graphql; + + if (options.file) { + if (!fs.existsSync(options.file)) { + logger.Error(`File not found: ${options.file}`); + process.exit(1); + } + query = fs.readFileSync(options.file, 'utf8'); + } + + if (!query) { + logger.Error("error: missing required argument 'graphql'"); + process.exit(1); + } + const authData = fetchAuthData(environment, program); const gateway = new Gateway(authData); @@ -42,7 +59,7 @@ program } try { - const response = await gateway.graph({ query: graphql }); + const response = await gateway.graph({ query }); if (response.errors) { logger.Error(`GraphQL execution error: ${JSON.stringify(response.errors, null, 2)}`); diff --git a/bin/pos-cli-exec-liquid.js b/bin/pos-cli-exec-liquid.js index 0947277f..a8a8000f 100644 --- a/bin/pos-cli-exec-liquid.js +++ b/bin/pos-cli-exec-liquid.js @@ -1,5 +1,6 @@ #!/usr/bin/env node +const fs = require('fs'); const { program } = require('commander'); const prompts = require('prompts'); const Gateway = require('../lib/proxy'); @@ -28,8 +29,24 @@ const confirmProductionExecution = async (environment) => { program .name('pos-cli exec liquid') .argument('', 'name of environment. Example: staging') - .argument('', 'liquid code to execute as string') - .action(async (environment, code) => { + .argument('[code]', 'liquid code to execute as string') + .option('-f, --file ', 'path to liquid file to execute') + .action(async (environment, code, options) => { + let liquidCode = code; + + if (options.file) { + if (!fs.existsSync(options.file)) { + logger.Error(`File not found: ${options.file}`); + process.exit(1); + } + liquidCode = fs.readFileSync(options.file, 'utf8'); + } + + if (!liquidCode) { + logger.Error("error: missing required argument 'code'"); + process.exit(1); + } + const authData = fetchAuthData(environment, program); const gateway = new Gateway(authData); @@ -42,7 +59,7 @@ program } try { - const response = await gateway.liquid({ content: code }); + const response = await gateway.liquid({ content: liquidCode }); if (response.error) { logger.Error(`Liquid execution error: ${response.error}`); diff --git a/test/exec-graphql.test.js b/test/exec-graphql.test.js index 2763a5a9..bba2f6b2 100644 --- a/test/exec-graphql.test.js +++ b/test/exec-graphql.test.js @@ -89,6 +89,37 @@ describe('exec graphql CLI', () => { expect(stdout).not.toMatch('WARNING: You are executing GraphQL on a production environment'); expect(stdout).not.toMatch('Execution cancelled.'); }); + + test('accepts --file flag and reads content from file', async () => { + const fixturePath = require('path').resolve(__dirname, 'fixtures/test-graphql.graphql'); + const { code, stdout, stderr } = await exec(`${cliPath} exec graphql staging --file "${fixturePath}"`, { env }); + + // Command will fail due to mock API but should not complain about missing graphql argument + expect(stderr).not.toMatch("error: missing required argument 'graphql'"); + }); + + test('accepts -f shorthand flag and reads content from file', async () => { + const fixturePath = require('path').resolve(__dirname, 'fixtures/test-graphql.graphql'); + const { code, stdout, stderr } = await exec(`${cliPath} exec graphql staging -f "${fixturePath}"`, { env }); + + // Command will fail due to mock API but should not complain about missing graphql argument + expect(stderr).not.toMatch("error: missing required argument 'graphql'"); + }); + + test('shows error when file does not exist', async () => { + const { code, stderr } = await exec(`${cliPath} exec graphql staging --file "/nonexistent/path/to/file.graphql"`, { env }); + + expect(code).toEqual(1); + expect(stderr).toMatch('File not found'); + expect(stderr).toMatch('/nonexistent/path/to/file.graphql'); + }); + + test('requires either graphql argument or --file option', async () => { + const { code, stderr } = await exec(`${cliPath} exec graphql staging`, { env }); + + expect(code).toEqual(1); + expect(stderr).toMatch("error: missing required argument 'graphql'"); + }); }); // Integration test - requires real platformOS instance diff --git a/test/exec-liquid.test.js b/test/exec-liquid.test.js index a8f64449..c3eb126a 100644 --- a/test/exec-liquid.test.js +++ b/test/exec-liquid.test.js @@ -72,6 +72,37 @@ describe('exec liquid CLI', () => { expect(stdout).not.toMatch('WARNING: You are executing liquid code on a production environment'); expect(stdout).not.toMatch('Execution cancelled.'); }); + + test('accepts --file flag and reads content from file', async () => { + const fixturePath = require('path').resolve(__dirname, 'fixtures/test-liquid.liquid'); + const { code, stdout, stderr } = await exec(`${cliPath} exec liquid staging --file "${fixturePath}"`, { env }); + + // Command will fail due to mock API but should not complain about missing code argument + expect(stderr).not.toMatch("error: missing required argument 'code'"); + }); + + test('accepts -f shorthand flag and reads content from file', async () => { + const fixturePath = require('path').resolve(__dirname, 'fixtures/test-liquid.liquid'); + const { code, stdout, stderr } = await exec(`${cliPath} exec liquid staging -f "${fixturePath}"`, { env }); + + // Command will fail due to mock API but should not complain about missing code argument + expect(stderr).not.toMatch("error: missing required argument 'code'"); + }); + + test('shows error when file does not exist', async () => { + const { code, stderr } = await exec(`${cliPath} exec liquid staging --file "/nonexistent/path/to/file.liquid"`, { env }); + + expect(code).toEqual(1); + expect(stderr).toMatch('File not found'); + expect(stderr).toMatch('/nonexistent/path/to/file.liquid'); + }); + + test('requires either code argument or --file option', async () => { + const { code, stderr } = await exec(`${cliPath} exec liquid staging`, { env }); + + expect(code).toEqual(1); + expect(stderr).toMatch("error: missing required argument 'code'"); + }); }); // Integration test - requires real platformOS instance diff --git a/test/fixtures/test-graphql.graphql b/test/fixtures/test-graphql.graphql new file mode 100644 index 00000000..2bb04d6a --- /dev/null +++ b/test/fixtures/test-graphql.graphql @@ -0,0 +1 @@ +{ records(per_page: 20) { results { id } } } diff --git a/test/fixtures/test-liquid.liquid b/test/fixtures/test-liquid.liquid new file mode 100644 index 00000000..bde0b260 --- /dev/null +++ b/test/fixtures/test-liquid.liquid @@ -0,0 +1 @@ +{{ 'hello world' | upcase }} From ac827cbbb3e93d212456fee6891ca864ee7b1c28 Mon Sep 17 00:00:00 2001 From: Maciej Krajowski-Kukiel Date: Thu, 15 Jan 2026 19:43:37 +0100 Subject: [PATCH 6/9] add changelog / readme --- CHANGELOG.md | 6 + README.md | 54 ++++++ .../app/lib/test/passing_one_test.liquid | 4 + .../app/lib/test/passing_two_test.liquid | 4 + .../with-passing-tests/app/pos-modules.json | 5 + .../app/pos-modules.lock.json | 5 + .../app/views/pages/index.liquid | 4 + .../public/graphql/sent_mails/search.graphql | 20 +++ .../public/graphql/test_files/count.graphql | 12 ++ .../public/graphql/test_files/search.graphql | 15 ++ .../tests/public/lib/assertions/blank.liquid | 7 + .../tests/public/lib/assertions/equal.liquid | 9 + .../lib/assertions/invalid_object.liquid | 7 + .../public/lib/assertions/not_presence.liquid | 7 + .../public/lib/assertions/not_true.liquid | 10 ++ .../lib/assertions/not_valid_object.liquid | 7 + .../assertions/object_contains_object.liquid | 20 +++ .../public/lib/assertions/presence.liquid | 7 + .../tests/public/lib/assertions/true.liquid | 10 ++ .../public/lib/assertions/valid_object.liquid | 8 + .../tests/public/lib/commands/run.liquid | 46 +++++ .../public/lib/helpers/register_error.liquid | 27 +++ .../public/lib/queries/sent_mails/find.liquid | 8 + .../lib/queries/sent_mails/search.liquid | 4 + .../tests/public/translations/en/should.yml | 16 ++ .../public/views/layouts/mailer.html.liquid | 165 ++++++++++++++++++ .../tests/public/views/layouts/test.liquid | 154 ++++++++++++++++ .../views/pages/_tests/index.html.liquid | 10 ++ .../public/views/pages/_tests/index.js.liquid | 28 +++ .../public/views/pages/_tests/run.html.liquid | 11 ++ .../public/views/pages/_tests/run.js.liquid | 13 ++ .../views/pages/_tests/run_async.js.liquid | 13 ++ .../views/pages/_tests/run_async.liquid | 10 ++ .../pages/_tests/sent_mails/index.liquid | 11 ++ .../views/pages/_tests/sent_mails/show.liquid | 11 ++ .../views/partials/sent_mails/list.liquid | 20 +++ .../partials/sent_mails/pagination.liquid | 66 +++++++ .../views/partials/sent_mails/show.liquid | 8 + .../public/views/partials/tests/index.liquid | 18 ++ .../views/partials/tests/show_html.liquid | 25 +++ .../views/partials/tests/show_js.liquid | 30 ++++ .../views/partials/tests/show_log.liquid | 7 + .../views/partials/tests/show_log_js.liquid | 8 + .../views/partials/tests/show_text.liquid | 22 +++ .../partials/tests/test_report_html.liquid | 17 ++ .../partials/tests/test_report_text.liquid | 5 + .../modules/tests/template-values.json | 7 + test/test-run.test.js | 146 ++++++++++------ 48 files changed, 1075 insertions(+), 52 deletions(-) create mode 100644 test/fixtures/test/with-passing-tests/app/lib/test/passing_one_test.liquid create mode 100644 test/fixtures/test/with-passing-tests/app/lib/test/passing_two_test.liquid create mode 100644 test/fixtures/test/with-passing-tests/app/pos-modules.json create mode 100644 test/fixtures/test/with-passing-tests/app/pos-modules.lock.json create mode 100644 test/fixtures/test/with-passing-tests/app/views/pages/index.liquid create mode 100644 test/fixtures/test/with-passing-tests/modules/tests/public/graphql/sent_mails/search.graphql create mode 100644 test/fixtures/test/with-passing-tests/modules/tests/public/graphql/test_files/count.graphql create mode 100644 test/fixtures/test/with-passing-tests/modules/tests/public/graphql/test_files/search.graphql create mode 100644 test/fixtures/test/with-passing-tests/modules/tests/public/lib/assertions/blank.liquid create mode 100644 test/fixtures/test/with-passing-tests/modules/tests/public/lib/assertions/equal.liquid create mode 100644 test/fixtures/test/with-passing-tests/modules/tests/public/lib/assertions/invalid_object.liquid create mode 100644 test/fixtures/test/with-passing-tests/modules/tests/public/lib/assertions/not_presence.liquid create mode 100644 test/fixtures/test/with-passing-tests/modules/tests/public/lib/assertions/not_true.liquid create mode 100644 test/fixtures/test/with-passing-tests/modules/tests/public/lib/assertions/not_valid_object.liquid create mode 100644 test/fixtures/test/with-passing-tests/modules/tests/public/lib/assertions/object_contains_object.liquid create mode 100644 test/fixtures/test/with-passing-tests/modules/tests/public/lib/assertions/presence.liquid create mode 100644 test/fixtures/test/with-passing-tests/modules/tests/public/lib/assertions/true.liquid create mode 100644 test/fixtures/test/with-passing-tests/modules/tests/public/lib/assertions/valid_object.liquid create mode 100644 test/fixtures/test/with-passing-tests/modules/tests/public/lib/commands/run.liquid create mode 100644 test/fixtures/test/with-passing-tests/modules/tests/public/lib/helpers/register_error.liquid create mode 100644 test/fixtures/test/with-passing-tests/modules/tests/public/lib/queries/sent_mails/find.liquid create mode 100644 test/fixtures/test/with-passing-tests/modules/tests/public/lib/queries/sent_mails/search.liquid create mode 100644 test/fixtures/test/with-passing-tests/modules/tests/public/translations/en/should.yml create mode 100644 test/fixtures/test/with-passing-tests/modules/tests/public/views/layouts/mailer.html.liquid create mode 100644 test/fixtures/test/with-passing-tests/modules/tests/public/views/layouts/test.liquid create mode 100644 test/fixtures/test/with-passing-tests/modules/tests/public/views/pages/_tests/index.html.liquid create mode 100644 test/fixtures/test/with-passing-tests/modules/tests/public/views/pages/_tests/index.js.liquid create mode 100644 test/fixtures/test/with-passing-tests/modules/tests/public/views/pages/_tests/run.html.liquid create mode 100644 test/fixtures/test/with-passing-tests/modules/tests/public/views/pages/_tests/run.js.liquid create mode 100644 test/fixtures/test/with-passing-tests/modules/tests/public/views/pages/_tests/run_async.js.liquid create mode 100644 test/fixtures/test/with-passing-tests/modules/tests/public/views/pages/_tests/run_async.liquid create mode 100644 test/fixtures/test/with-passing-tests/modules/tests/public/views/pages/_tests/sent_mails/index.liquid create mode 100644 test/fixtures/test/with-passing-tests/modules/tests/public/views/pages/_tests/sent_mails/show.liquid create mode 100644 test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/sent_mails/list.liquid create mode 100644 test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/sent_mails/pagination.liquid create mode 100644 test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/sent_mails/show.liquid create mode 100644 test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/tests/index.liquid create mode 100644 test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/tests/show_html.liquid create mode 100644 test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/tests/show_js.liquid create mode 100644 test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/tests/show_log.liquid create mode 100644 test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/tests/show_log_js.liquid create mode 100644 test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/tests/show_text.liquid create mode 100644 test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/tests/test_report_html.liquid create mode 100644 test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/tests/test_report_text.liquid create mode 100644 test/fixtures/test/with-passing-tests/modules/tests/template-values.json diff --git a/CHANGELOG.md b/CHANGELOG.md index 9d9bd37b..baa2aae1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ # Changelog +## Unreleased + +* Feature: `pos-cli exec liquid` command to execute Liquid code directly on an instance (supports `-f` flag to load from file, requires confirmation on production) +* Feature: `pos-cli exec graphql` command to execute GraphQL queries directly on an instance (supports `-f` flag to load from file, requires confirmation on production) +* Feature: `pos-cli test run` command to run tests using the tests module + ## 5.5.0 * Feature: (GUI) Ability to add, edit and remove users diff --git a/README.md b/README.md index ddb9e007..8ad85d61 100644 --- a/README.md +++ b/README.md @@ -444,6 +444,60 @@ If you need guidance or additional information about how to use a specific gener pos-cli generate modules/core/generators/command --generator-help +### Executing Code + +#### Execute Liquid + +Execute Liquid code directly on your instance: + + pos-cli exec liquid [environment] [code] + +Example: + + pos-cli exec liquid staging "{{ 'hello' | upcase }}" + +You can also execute Liquid code from a file using the `-f` flag: + + pos-cli exec liquid staging -f path/to/script.liquid + +#### Execute GraphQL + +Execute GraphQL queries directly on your instance: + + pos-cli exec graphql [environment] [query] + +Example: + + pos-cli exec graphql staging "{ users(per_page: 5) { results { id email } } }" + +You can also execute GraphQL from a file using the `-f` flag: + + pos-cli exec graphql staging -f path/to/query.graphql + +**Note:** When executing on production environments (environment name contains "prod" or "production"), you will be prompted for confirmation before execution. + +### Running Tests + +To run tests on your instance, you need to have the [tests module](https://github.com/Platform-OS/pos-module-tests) installed. + +#### Run All Tests + + pos-cli test run [environment] + +Example: + + pos-cli test run staging + +This command runs all tests and streams the results in real-time, showing individual test outcomes and a summary at the end. + +#### Run a Single Test + + pos-cli test run [environment] [test-name] + +Example: + + pos-cli test run staging my_test + ## Development The `pos-cli gui serve` command uses a distinct build process for the GraphiQL interface located in the `gui/editor/graphql` directory. diff --git a/test/fixtures/test/with-passing-tests/app/lib/test/passing_one_test.liquid b/test/fixtures/test/with-passing-tests/app/lib/test/passing_one_test.liquid new file mode 100644 index 00000000..aa3d2f25 --- /dev/null +++ b/test/fixtures/test/with-passing-tests/app/lib/test/passing_one_test.liquid @@ -0,0 +1,4 @@ +{% liquid + assign var = '{"field": 1 }' | parse_json + include 'modules/tests/assertions/equal', contract: contract, given: var.field, expected: 1, field_name: 'field' +%} diff --git a/test/fixtures/test/with-passing-tests/app/lib/test/passing_two_test.liquid b/test/fixtures/test/with-passing-tests/app/lib/test/passing_two_test.liquid new file mode 100644 index 00000000..72d527d6 --- /dev/null +++ b/test/fixtures/test/with-passing-tests/app/lib/test/passing_two_test.liquid @@ -0,0 +1,4 @@ +{% liquid + assign var = '{"value": "hello" }' | parse_json + include 'modules/tests/assertions/equal', contract: contract, given: var.value, expected: 'hello', field_name: 'value' +%} diff --git a/test/fixtures/test/with-passing-tests/app/pos-modules.json b/test/fixtures/test/with-passing-tests/app/pos-modules.json new file mode 100644 index 00000000..d6ee86fe --- /dev/null +++ b/test/fixtures/test/with-passing-tests/app/pos-modules.json @@ -0,0 +1,5 @@ +{ + "modules": { + "tests": "1.2.0" + } +} diff --git a/test/fixtures/test/with-passing-tests/app/pos-modules.lock.json b/test/fixtures/test/with-passing-tests/app/pos-modules.lock.json new file mode 100644 index 00000000..d6ee86fe --- /dev/null +++ b/test/fixtures/test/with-passing-tests/app/pos-modules.lock.json @@ -0,0 +1,5 @@ +{ + "modules": { + "tests": "1.2.0" + } +} diff --git a/test/fixtures/test/with-passing-tests/app/views/pages/index.liquid b/test/fixtures/test/with-passing-tests/app/views/pages/index.liquid new file mode 100644 index 00000000..ddae8958 --- /dev/null +++ b/test/fixtures/test/with-passing-tests/app/views/pages/index.liquid @@ -0,0 +1,4 @@ +--- +slug: test-fixture-index +--- +Test fixture placeholder page diff --git a/test/fixtures/test/with-passing-tests/modules/tests/public/graphql/sent_mails/search.graphql b/test/fixtures/test/with-passing-tests/modules/tests/public/graphql/sent_mails/search.graphql new file mode 100644 index 00000000..7ea4a0b2 --- /dev/null +++ b/test/fixtures/test/with-passing-tests/modules/tests/public/graphql/sent_mails/search.graphql @@ -0,0 +1,20 @@ +query mails($id: ID, $limit: Int = 20, $page: Int = 1) { + mails: admin_sent_notifications( + per_page: $limit + page: $page + filter: { id: { value: $id }, notification_type: { value: EMAIL } } + sort: { created_at: { order: DESC } } + ) { + total_entries + total_pages + current_page + has_previous_page + has_next_page + results { + id + created_at + content + options + } + } +} diff --git a/test/fixtures/test/with-passing-tests/modules/tests/public/graphql/test_files/count.graphql b/test/fixtures/test/with-passing-tests/modules/tests/public/graphql/test_files/count.graphql new file mode 100644 index 00000000..d507b054 --- /dev/null +++ b/test/fixtures/test/with-passing-tests/modules/tests/public/graphql/test_files/count.graphql @@ -0,0 +1,12 @@ +query count_test_partials($path: String, $per_page: Int!){ + admin_liquid_partials( + per_page: $per_page + filter: { + path: { ends_with: "_test", contains: $path } + } + + ) { + total_entries + total_pages + } +} diff --git a/test/fixtures/test/with-passing-tests/modules/tests/public/graphql/test_files/search.graphql b/test/fixtures/test/with-passing-tests/modules/tests/public/graphql/test_files/search.graphql new file mode 100644 index 00000000..0b6bf71b --- /dev/null +++ b/test/fixtures/test/with-passing-tests/modules/tests/public/graphql/test_files/search.graphql @@ -0,0 +1,15 @@ +query test_partials($path: String, $per_page: Int = 100, $page: Int = 1){ + admin_liquid_partials( + per_page: $per_page + page: $page + filter: { + path: { ends_with: "_test", contains: $path } + } + + ) { + total_entries + results { + path + } + } +} diff --git a/test/fixtures/test/with-passing-tests/modules/tests/public/lib/assertions/blank.liquid b/test/fixtures/test/with-passing-tests/modules/tests/public/lib/assertions/blank.liquid new file mode 100644 index 00000000..3a83aff5 --- /dev/null +++ b/test/fixtures/test/with-passing-tests/modules/tests/public/lib/assertions/blank.liquid @@ -0,0 +1,7 @@ +{% liquid + hash_assign contract['total'] = contract['total'] | plus: 1 + unless object[field_name] == blank + function contract = 'modules/tests/helpers/register_error', contract: contract, field_name: field_name, key: 'modules/tests/should.be_blank', message: null + endunless + return contract +%} diff --git a/test/fixtures/test/with-passing-tests/modules/tests/public/lib/assertions/equal.liquid b/test/fixtures/test/with-passing-tests/modules/tests/public/lib/assertions/equal.liquid new file mode 100644 index 00000000..33a4401e --- /dev/null +++ b/test/fixtures/test/with-passing-tests/modules/tests/public/lib/assertions/equal.liquid @@ -0,0 +1,9 @@ +{% liquid + hash_assign contract['total'] = contract['total'] | plus: 1 + + if given != expected + assign msg = 'modules/tests/should.equal' | t: given: given, expected: expected + function contract = 'modules/tests/helpers/register_error', contract: contract, field_name: field_name, message: msg + endif + return contract +%} diff --git a/test/fixtures/test/with-passing-tests/modules/tests/public/lib/assertions/invalid_object.liquid b/test/fixtures/test/with-passing-tests/modules/tests/public/lib/assertions/invalid_object.liquid new file mode 100644 index 00000000..ed73da97 --- /dev/null +++ b/test/fixtures/test/with-passing-tests/modules/tests/public/lib/assertions/invalid_object.liquid @@ -0,0 +1,7 @@ +{% liquid + hash_assign contract['total'] = contract['total'] | plus: 1 + if object.valid + function contract = 'modules/tests/helpers/register_error', contract: contract, field_name: field_name, message: object.errors + endif + return contract +%} diff --git a/test/fixtures/test/with-passing-tests/modules/tests/public/lib/assertions/not_presence.liquid b/test/fixtures/test/with-passing-tests/modules/tests/public/lib/assertions/not_presence.liquid new file mode 100644 index 00000000..f9806eb6 --- /dev/null +++ b/test/fixtures/test/with-passing-tests/modules/tests/public/lib/assertions/not_presence.liquid @@ -0,0 +1,7 @@ +{% liquid + hash_assign contract['total'] = contract['total'] | plus: 1 + if object[field_name] != blank + function contract = 'modules/tests/helpers/register_error', contract: contract, field_name: field_name, key: 'modules/tests/should.not.be_blank' + endif + return contract +%} diff --git a/test/fixtures/test/with-passing-tests/modules/tests/public/lib/assertions/not_true.liquid b/test/fixtures/test/with-passing-tests/modules/tests/public/lib/assertions/not_true.liquid new file mode 100644 index 00000000..61b055ac --- /dev/null +++ b/test/fixtures/test/with-passing-tests/modules/tests/public/lib/assertions/not_true.liquid @@ -0,0 +1,10 @@ +{% liquid + hash_assign contract['total'] = contract['total'] | plus: 1 + + assign value = value | default: object[field_name] + if value + function contract = 'modules/tests/helpers/register_error', contract: contract, field_name: field_name, key: 'modules/tests/should.not.be_true' + endif + + return contract +%} diff --git a/test/fixtures/test/with-passing-tests/modules/tests/public/lib/assertions/not_valid_object.liquid b/test/fixtures/test/with-passing-tests/modules/tests/public/lib/assertions/not_valid_object.liquid new file mode 100644 index 00000000..5268eb4c --- /dev/null +++ b/test/fixtures/test/with-passing-tests/modules/tests/public/lib/assertions/not_valid_object.liquid @@ -0,0 +1,7 @@ +{% liquid + hash_assign contract['total'] = contract['total'] | plus: 1 + if object.valid == true + function contract = 'modules/tests/helpers/register_error', contract: contract, field_name: field_name, key: 'modules/tests/should.not.be_valid' + endif + return contract +%} diff --git a/test/fixtures/test/with-passing-tests/modules/tests/public/lib/assertions/object_contains_object.liquid b/test/fixtures/test/with-passing-tests/modules/tests/public/lib/assertions/object_contains_object.liquid new file mode 100644 index 00000000..15eec117 --- /dev/null +++ b/test/fixtures/test/with-passing-tests/modules/tests/public/lib/assertions/object_contains_object.liquid @@ -0,0 +1,20 @@ +{% liquid + hash_assign contract['total'] = contract['total'] | plus: 1 + + for property in object_contains + assign key = property[0] + assign value = property[1] + + if given[key] == blank + assign message = 'modules/tests/should.have_key' | t: field_name: field_name + function contract = 'modules/tests/helpers/register_error', contract: contract, field_name: key, message: message + else + if given[key] != value + assign message = 'modules/tests/should.have_key_with_value' | t: value: value + function contract = 'modules/tests/helpers/register_error', contract: contract, field_name: key, message: message + endif + endif + endfor + + return contract +%} diff --git a/test/fixtures/test/with-passing-tests/modules/tests/public/lib/assertions/presence.liquid b/test/fixtures/test/with-passing-tests/modules/tests/public/lib/assertions/presence.liquid new file mode 100644 index 00000000..10f1ab7e --- /dev/null +++ b/test/fixtures/test/with-passing-tests/modules/tests/public/lib/assertions/presence.liquid @@ -0,0 +1,7 @@ +{% liquid + hash_assign contract['total'] = contract['total'] | plus: 1 + if object[field_name] == blank + function contract = 'modules/tests/helpers/register_error', contract: contract, field_name: field_name, key: 'modules/tests/should.not.be_blank' + endif + return contract +%} diff --git a/test/fixtures/test/with-passing-tests/modules/tests/public/lib/assertions/true.liquid b/test/fixtures/test/with-passing-tests/modules/tests/public/lib/assertions/true.liquid new file mode 100644 index 00000000..daac6d3b --- /dev/null +++ b/test/fixtures/test/with-passing-tests/modules/tests/public/lib/assertions/true.liquid @@ -0,0 +1,10 @@ +{% liquid + hash_assign contract['total'] = contract['total'] | plus: 1 + + assign value = value | default: object[field_name] + unless value + function contract = 'modules/tests/helpers/register_error', contract: contract, field_name: field_name, key: 'modules/tests/should.be_true' + endunless + + return contract +%} diff --git a/test/fixtures/test/with-passing-tests/modules/tests/public/lib/assertions/valid_object.liquid b/test/fixtures/test/with-passing-tests/modules/tests/public/lib/assertions/valid_object.liquid new file mode 100644 index 00000000..a4c44f2b --- /dev/null +++ b/test/fixtures/test/with-passing-tests/modules/tests/public/lib/assertions/valid_object.liquid @@ -0,0 +1,8 @@ +{% liquid + hash_assign contract['total'] = contract['total'] | plus: 1 + if object.valid != true + assign message = 'should be valid: ' | append: object.errors + function contract = 'modules/tests/helpers/register_error', contract: contract, field_name: field_name, message: message + endif + return contract +%} diff --git a/test/fixtures/test/with-passing-tests/modules/tests/public/lib/commands/run.liquid b/test/fixtures/test/with-passing-tests/modules/tests/public/lib/commands/run.liquid new file mode 100644 index 00000000..87930c7d --- /dev/null +++ b/test/fixtures/test/with-passing-tests/modules/tests/public/lib/commands/run.liquid @@ -0,0 +1,46 @@ +{% liquid + assign ctx = context + hash_assign ctx['tests'] = true + log 'Starting unit tests', type: test_name + assign __start = "now" | to_time + assign per_page = 100 + graphql total_pages = 'modules/tests/test_files/count', per_page: per_page, path: context.params.name | dig: "admin_liquid_partials" | dig: "total_pages" + + if tests.size == 0 + unless format == 'js' + echo 'no tests found' + endunless + endif + assign total_errors = 0 + assign contracts = '' | split: ',' + + for page in (1..total_pages) + graphql tests = 'modules/tests/test_files/search', path: context.params.name, page: page, per_page: per_page | dig: "admin_liquid_partials" | dig: "results" + for test in tests + log test, type: test_name + assign contract = '{ "errors": {}, "success": true, "total": 0 }' | parse_json + + # platformos-check-disable ConvertIncludeToRender + include test.path, registry: test.path, contract: contract + # platformos-check-enable ConvertIncludeToRender + hash_assign contract['test_path'] = test.path + assign contracts = contracts | add_to_array: contract + assign total_errors = total_errors | plus: contract.errors.size + endfor + endfor + assign __stop = "now" | to_time + assign total_duration = __start | time_diff: __stop, 'ms' | round + + assign data = '{}' | parse_json + hash_assign data['contracts'] = contracts + hash_assign data['total_errors'] = total_errors + hash_assign data['total_duration'] = total_duration + + assign test_formatter = format | default: 'html' | prepend: 'modules/tests/tests/show_' + # platformos-check-disable ConvertIncludeToRender + include test_formatter, contracts: contracts, total_errors: total_errors, total_duration: total_duration, test_name: test_name + # platformos-check-enable ConvertIncludeToRender + if total_errors > 0 + response_status 500 + endif +%} diff --git a/test/fixtures/test/with-passing-tests/modules/tests/public/lib/helpers/register_error.liquid b/test/fixtures/test/with-passing-tests/modules/tests/public/lib/helpers/register_error.liquid new file mode 100644 index 00000000..11dfa1d1 --- /dev/null +++ b/test/fixtures/test/with-passing-tests/modules/tests/public/lib/helpers/register_error.liquid @@ -0,0 +1,27 @@ +{% comment %} + @params + contract - { errors: {}, success: true } + field_name + message: + key: i18n to be resolved into message +{% endcomment %} + +{% liquid + assign key = key | default: null + assign message = message | default: null + if key + assign msg = key | t + else + assign msg = message + endif + + assign errors = contract.errors + + assign field_erorrs = errors[field_name] | default: '[]' | parse_json + assign field_erorrs = field_erorrs | array_add: msg + + hash_assign errors[field_name] = field_erorrs + hash_assign contract['success'] = false + + return contract +%} diff --git a/test/fixtures/test/with-passing-tests/modules/tests/public/lib/queries/sent_mails/find.liquid b/test/fixtures/test/with-passing-tests/modules/tests/public/lib/queries/sent_mails/find.liquid new file mode 100644 index 00000000..4ab4a057 --- /dev/null +++ b/test/fixtures/test/with-passing-tests/modules/tests/public/lib/queries/sent_mails/find.liquid @@ -0,0 +1,8 @@ +{% liquid + if id == blank + return null + endif + + graphql r = 'modules/tests/sent_mails/search', id: id, limit: 1 + return r.mails.results.first +%} diff --git a/test/fixtures/test/with-passing-tests/modules/tests/public/lib/queries/sent_mails/search.liquid b/test/fixtures/test/with-passing-tests/modules/tests/public/lib/queries/sent_mails/search.liquid new file mode 100644 index 00000000..066ec067 --- /dev/null +++ b/test/fixtures/test/with-passing-tests/modules/tests/public/lib/queries/sent_mails/search.liquid @@ -0,0 +1,4 @@ +{% liquid + graphql r = 'modules/tests/sent_mails/search', limit: limit, page: page + return r.mails +%} diff --git a/test/fixtures/test/with-passing-tests/modules/tests/public/translations/en/should.yml b/test/fixtures/test/with-passing-tests/modules/tests/public/translations/en/should.yml new file mode 100644 index 00000000..6f40d6fc --- /dev/null +++ b/test/fixtures/test/with-passing-tests/modules/tests/public/translations/en/should.yml @@ -0,0 +1,16 @@ +en: + should: + be_false: should be false + be_valid: should be valid + equal: expected %{given} to equal %{expected} + equal_not_verbose: does not match + have_key: key should exist in "%{field_name}" + have_key_with_value: should have value "%{value}" + match: match + be_blank: should be blank + be_true: should be true + not: + be_empty: should not be empty + be_blank: should not be blank + be_valid: should not be valid + be_true: should not be true diff --git a/test/fixtures/test/with-passing-tests/modules/tests/public/views/layouts/mailer.html.liquid b/test/fixtures/test/with-passing-tests/modules/tests/public/views/layouts/mailer.html.liquid new file mode 100644 index 00000000..c33042aa --- /dev/null +++ b/test/fixtures/test/with-passing-tests/modules/tests/public/views/layouts/mailer.html.liquid @@ -0,0 +1,165 @@ + + + + + + + + + + + {% liquid + assign url = 'https://' | append: context.location.host + %} + +
+

+ +
+ {{ 'app.title' | t: default: 'App' }} +
+

+ + {{ content_for_layout }} + + +
+ + + diff --git a/test/fixtures/test/with-passing-tests/modules/tests/public/views/layouts/test.liquid b/test/fixtures/test/with-passing-tests/modules/tests/public/views/layouts/test.liquid new file mode 100644 index 00000000..6b57c725 --- /dev/null +++ b/test/fixtures/test/with-passing-tests/modules/tests/public/views/layouts/test.liquid @@ -0,0 +1,154 @@ + + + + + + + +
+
+ {{ content_for_layout }} +
+
+ + diff --git a/test/fixtures/test/with-passing-tests/modules/tests/public/views/pages/_tests/index.html.liquid b/test/fixtures/test/with-passing-tests/modules/tests/public/views/pages/_tests/index.html.liquid new file mode 100644 index 00000000..5db7ce85 --- /dev/null +++ b/test/fixtures/test/with-passing-tests/modules/tests/public/views/pages/_tests/index.html.liquid @@ -0,0 +1,10 @@ +--- +layout: modules/tests/test +--- +{% liquid + if context.environment == 'staging' or context.environment == 'development' + graphql tests = 'modules/tests/test_files/search', path: context.params.name | dig: "admin_liquid_partials" | dig: "results" + + render 'modules/tests/tests/index', tests: tests + endif +%} diff --git a/test/fixtures/test/with-passing-tests/modules/tests/public/views/pages/_tests/index.js.liquid b/test/fixtures/test/with-passing-tests/modules/tests/public/views/pages/_tests/index.js.liquid new file mode 100644 index 00000000..07256730 --- /dev/null +++ b/test/fixtures/test/with-passing-tests/modules/tests/public/views/pages/_tests/index.js.liquid @@ -0,0 +1,28 @@ +--- +layout: '' +--- +{% liquid + if context.environment == 'staging' or context.environment == 'development' + assign per_page = 100 + graphql total_pages = 'modules/tests/test_files/count', per_page: per_page, path: context.params.name | dig: "admin_liquid_partials" | dig: "total_pages" + + assign result = '[]' | parse_json + + for page in (1..total_pages) + graphql tests = 'modules/tests/test_files/search', path: context.params.name, page: page, per_page: per_page | dig: "admin_liquid_partials" | dig: "results" + + for test in tests + assign test_name = test.path | remove_first: 'lib/test/' | remove_first: '_test' + assign test_url = '/_tests/run.js?test_name=' | append: test_name + assign test_object = '{}' | parse_json + hash_assign test_object['name'] = test_name + hash_assign test_object['url'] = test_url + assign result = result | add_to_array: test_object + endfor + endfor + + echo result | json + else + echo '{"error":"Tests can only be accessed in staging or development environment"}' + endif +%} diff --git a/test/fixtures/test/with-passing-tests/modules/tests/public/views/pages/_tests/run.html.liquid b/test/fixtures/test/with-passing-tests/modules/tests/public/views/pages/_tests/run.html.liquid new file mode 100644 index 00000000..78ba8fb2 --- /dev/null +++ b/test/fixtures/test/with-passing-tests/modules/tests/public/views/pages/_tests/run.html.liquid @@ -0,0 +1,11 @@ +--- +layout: modules/tests/test +--- +{% liquid + if context.environment == 'staging' or context.environment == 'development' + assign test_name = 5 | random_string | prepend: "liquid_test_" + # platformos-check-disable ConvertIncludeToRender + include 'modules/tests/commands/run', format: context.params.formatter, test_name: test_name + # platformos-check-enable ConvertIncludeToRender + endif +%} diff --git a/test/fixtures/test/with-passing-tests/modules/tests/public/views/pages/_tests/run.js.liquid b/test/fixtures/test/with-passing-tests/modules/tests/public/views/pages/_tests/run.js.liquid new file mode 100644 index 00000000..d36090cc --- /dev/null +++ b/test/fixtures/test/with-passing-tests/modules/tests/public/views/pages/_tests/run.js.liquid @@ -0,0 +1,13 @@ +--- +layout: '' +--- +{% liquid + if context.environment == 'staging' or context.environment == 'development' + assign test_name = 5 | random_string | prepend: "liquid_test_" + # platformos-check-disable ConvertIncludeToRender + include 'modules/tests/commands/run', format: 'js', test_name: test_name + # platformos-check-enable ConvertIncludeToRender + else + echo '{"success":false,"error":"Tests can only be run in staging or development environment"}' + endif +%} diff --git a/test/fixtures/test/with-passing-tests/modules/tests/public/views/pages/_tests/run_async.js.liquid b/test/fixtures/test/with-passing-tests/modules/tests/public/views/pages/_tests/run_async.js.liquid new file mode 100644 index 00000000..79956bd9 --- /dev/null +++ b/test/fixtures/test/with-passing-tests/modules/tests/public/views/pages/_tests/run_async.js.liquid @@ -0,0 +1,13 @@ +--- +layout: '' +--- +{% if context.environment == 'staging' or context.environment == 'development' %} + {% assign test_name = 5 | random_string | prepend: "liquid_test_" %} + {% background source_name: "liquid_tests", test_name: test_name %} + {% include 'modules/tests/commands/run', format: 'log_js', test_name: test_name %} + {% endbackground %} + {% assign result = '{}' | parse_json | hash_merge: test_name: test_name %} + {{ result }} +{% else %} + {"success":false,"error":"Tests can only be run in staging or development environment"} +{% endif %} diff --git a/test/fixtures/test/with-passing-tests/modules/tests/public/views/pages/_tests/run_async.liquid b/test/fixtures/test/with-passing-tests/modules/tests/public/views/pages/_tests/run_async.liquid new file mode 100644 index 00000000..7cb08dfb --- /dev/null +++ b/test/fixtures/test/with-passing-tests/modules/tests/public/views/pages/_tests/run_async.liquid @@ -0,0 +1,10 @@ +--- +layout: '' +--- +{% if context.environment == 'staging' %} + {% assign test_name = 5 | random_string | prepend: "liquid_test_" %} + {% background source_name: "liquid_tests", test_name: test_name %} + {% include 'modules/tests/commands/run', format: 'log', test_name: test_name %} + {% endbackground %} + {{ test_name }} +{% endif %} diff --git a/test/fixtures/test/with-passing-tests/modules/tests/public/views/pages/_tests/sent_mails/index.liquid b/test/fixtures/test/with-passing-tests/modules/tests/public/views/pages/_tests/sent_mails/index.liquid new file mode 100644 index 00000000..2e5885b2 --- /dev/null +++ b/test/fixtures/test/with-passing-tests/modules/tests/public/views/pages/_tests/sent_mails/index.liquid @@ -0,0 +1,11 @@ +--- +layout: modules/tests/test +--- +{% liquid + if context.environment == 'staging' or context.environment == 'development' + assign page = context.params.page | to_positive_integer: 1 + function mails = 'modules/tests/queries/sent_mails/search', limit: 20, page: page + + render 'modules/tests/sent_mails/list', mails: mails + endif +%} diff --git a/test/fixtures/test/with-passing-tests/modules/tests/public/views/pages/_tests/sent_mails/show.liquid b/test/fixtures/test/with-passing-tests/modules/tests/public/views/pages/_tests/sent_mails/show.liquid new file mode 100644 index 00000000..5c612fe9 --- /dev/null +++ b/test/fixtures/test/with-passing-tests/modules/tests/public/views/pages/_tests/sent_mails/show.liquid @@ -0,0 +1,11 @@ +--- +slug: _tests/sent_mails/:id +layout: modules/tests/test +--- +{% liquid + if context.environment == 'staging' or context.environment == 'development' + function mail = 'modules/tests/queries/sent_mails/find', id: context.params.id + + render 'modules/tests/sent_mails/show', mail: mail + endif +%} diff --git a/test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/sent_mails/list.liquid b/test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/sent_mails/list.liquid new file mode 100644 index 00000000..eb8e2452 --- /dev/null +++ b/test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/sent_mails/list.liquid @@ -0,0 +1,20 @@ +

Sent mails

+
+
+
Subject
+
To
+
Sent at
+
+
+
+ {% for mail in mails.results %} +
    +
  • {{ mail.options.subject }}
  • +
  • {{ mail.options.to | join: ',' }}
  • +
  • {{ mail.created_at | l }}
  • +
  • Show
  • +
+ {% endfor %} +
+
+ {% render 'modules/tests/sent_mails/pagination', collection: mails %} diff --git a/test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/sent_mails/pagination.liquid b/test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/sent_mails/pagination.liquid new file mode 100644 index 00000000..1ecf01a8 --- /dev/null +++ b/test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/sent_mails/pagination.liquid @@ -0,0 +1,66 @@ +{% comment %} + Required params: + collection: collection + current_page: integer + Optional params: + button_attrs: string + container_class: string +{% endcomment %} +{% liquid + assign container_class = container_class | default: "subtitle flex justify-center md:justify-end items-center mt-8 mx-auto md:mr-0 md:ms-auto" + assign button_attrs = button_attrs | default: '' | html_safe + assign current_page = collection.current_page | to_positive_integer: 1 + assign page_name = page_name | default: 'page' +%} + +{% if collection.has_previous_page or collection.has_next_page %} +
+ +
+ {% if collection.has_previous_page %} + + {% endif %} + + {% liquid + assign range_low = current_page | minus: 2 | at_least: 1 + assign range_high = range_low | plus: 4 | at_most: collection.total_pages + %} + {% for page_num in (range_low..range_high) %} + {% if page_num == current_page %} + {{ page_num }} + {% else %} + + {% endif %} + {% endfor %} + + {% if collection.has_next_page %} + + {% endif %} +
+
+{% endif %} diff --git a/test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/sent_mails/show.liquid b/test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/sent_mails/show.liquid new file mode 100644 index 00000000..2fad3804 --- /dev/null +++ b/test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/sent_mails/show.liquid @@ -0,0 +1,8 @@ +Back +

Sent mail

+

Sujbect: {{ mail.options.subject }}

+

To: {{ mail.options.to | join: ',' }}

+

Sent at: {{ mail.created_at | l }}

+
+ +{% print mail.content %} diff --git a/test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/tests/index.liquid b/test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/tests/index.liquid new file mode 100644 index 00000000..424cd724 --- /dev/null +++ b/test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/tests/index.liquid @@ -0,0 +1,18 @@ + +
+
+
Test
+
+
+
+ {% for test in tests %} +
    + {% assign test_name = test.path | split: 'test/' | last %} +
  • {{ test.path }}
  • +
  • Run
  • +
+ {% endfor %} +
diff --git a/test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/tests/show_html.liquid b/test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/tests/show_html.liquid new file mode 100644 index 00000000..c2ce5684 --- /dev/null +++ b/test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/tests/show_html.liquid @@ -0,0 +1,25 @@ +
+{% assign total = 0 %} +{% liquid + for contract in contracts + render 'modules/tests/tests/test_report_html', name: contract.test_path, contract: contract + assign total = total | plus: contract.total + endfor +%} + +{% if total_errors > 0 %} +

Total errors: {{ total_errors }}

+ {% response_status 500 %} +{% endif %} + +
+ +

+ {% if total_errors > 0 %} + Failure. + {% else %} + Success. + {% endif %} +Assertions: {{ total }}. Failed: {{ total_errors }}. Time: {{ total_duration }}ms +

+
diff --git a/test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/tests/show_js.liquid b/test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/tests/show_js.liquid new file mode 100644 index 00000000..f9e4d755 --- /dev/null +++ b/test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/tests/show_js.liquid @@ -0,0 +1,30 @@ +{% liquid + assign result = '{}' | parse_json + assign total_assertions = 0 + + assign tests_array = '[]' | parse_json + for contract in contracts + assign total_assertions = total_assertions | plus: contract.total + + assign test_result = '{}' | parse_json + hash_assign test_result['name'] = contract.test_path + hash_assign test_result['success'] = contract.success + hash_assign test_result['assertions'] = contract.total + hash_assign test_result['errors'] = contract.errors + + assign tests_array = tests_array | add_to_array: test_result + endfor + + if total_errors > 0 + hash_assign result['success'] = false + else + hash_assign result['success'] = true + endif + + hash_assign result['total_tests'] = contracts.size + hash_assign result['total_assertions'] = total_assertions + hash_assign result['total_errors'] = total_errors + hash_assign result['duration_ms'] = total_duration + hash_assign result['tests'] = tests_array +%} +{{ result | json }} diff --git a/test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/tests/show_log.liquid b/test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/tests/show_log.liquid new file mode 100644 index 00000000..9abe2cc7 --- /dev/null +++ b/test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/tests/show_log.liquid @@ -0,0 +1,7 @@ +{% capture result %} + {% render 'modules/tests/tests/show_text', contracts: contracts, total_errors: total_errors, total_duration: total_duration, test_name: test_name %} +{% endcapture %} +{% liquid + assign log_type = test_name | append: ' SUMMARY' + log result, type: log_type +%} diff --git a/test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/tests/show_log_js.liquid b/test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/tests/show_log_js.liquid new file mode 100644 index 00000000..ffd51c6b --- /dev/null +++ b/test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/tests/show_log_js.liquid @@ -0,0 +1,8 @@ +{% capture result %} + {% render 'modules/tests/tests/show_js', contracts: contracts, total_errors: total_errors, total_duration: total_duration, test_name: test_name %} +{% endcapture %} +{% assign result = result | html_safe %} +{% liquid + assign log_type = test_name | append: ' SUMMARY' + log result, type: log_type +%} diff --git a/test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/tests/show_text.liquid b/test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/tests/show_text.liquid new file mode 100644 index 00000000..3108c53b --- /dev/null +++ b/test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/tests/show_text.liquid @@ -0,0 +1,22 @@ +Liquid tests +------------------------ +{% liquid + for contract in contracts + render 'modules/tests/tests/test_report_text', name: contract.test_path, contract: contract + assign total = total | plus: contract.total + endfor +%} +------------------------ +{% liquid + if total_errors > 0 + assign result = 'Failed' + else + assign result = 'Success' + endif +%} +{{ result }}_{{ test_name | strip }} +{% if total_errors > 0 %} + Total errors: {{ total_errors }} +{% endif %} + +Assertions: {{ total }}. Failed: {{ total_errors }}. Time: {{ total_duration }}ms diff --git a/test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/tests/test_report_html.liquid b/test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/tests/test_report_html.liquid new file mode 100644 index 00000000..2ff1a431 --- /dev/null +++ b/test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/tests/test_report_html.liquid @@ -0,0 +1,17 @@ +
+
+ {% assign test_name = name | replace: 'test/', '' %} + {{ test_name }} + + (run test) + +
+
+ {% for e in contract.errors %} +
+
{{ e[0] }}
+
{{ e[1] | join: ",
" | html_safe }}
+
+ {% endfor %} +
+
diff --git a/test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/tests/test_report_text.liquid b/test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/tests/test_report_text.liquid new file mode 100644 index 00000000..29a84503 --- /dev/null +++ b/test/fixtures/test/with-passing-tests/modules/tests/public/views/partials/tests/test_report_text.liquid @@ -0,0 +1,5 @@ +{% assign test_name = name | replace: 'test/', '' %} +{{ test_name }} +{% for e in contract.errors %} + {{ e[0] }} {{ e[1] | join: ", " }} +{% endfor %} diff --git a/test/fixtures/test/with-passing-tests/modules/tests/template-values.json b/test/fixtures/test/with-passing-tests/modules/tests/template-values.json new file mode 100644 index 00000000..59e310f0 --- /dev/null +++ b/test/fixtures/test/with-passing-tests/modules/tests/template-values.json @@ -0,0 +1,7 @@ +{ + "name": "Pos Module Tests", + "machine_name": "tests", + "type": "module", + "version": "1.2.0", + "dependencies": {} +} diff --git a/test/test-run.test.js b/test/test-run.test.js index be895405..93a9a3ff 100644 --- a/test/test-run.test.js +++ b/test/test-run.test.js @@ -13,75 +13,117 @@ const deploy = (fixtureName) => exec(`${cliPath} deploy staging`, { cwd: cwd(fix jest.setTimeout(200000); // Test run can take a while due to log polling describe('Test run command', () => { - // Deploy the test fixtures before running any tests - beforeAll(async () => { - const { stdout, stderr } = await deploy('with-tests-module'); - if (!stdout.includes('Deploy succeeded')) { - console.error('Deploy failed:', stderr); - throw new Error('Failed to deploy test fixtures'); - } - }); + describe('with mixed tests (passing and failing)', () => { + // Deploy the test fixtures before running any tests + beforeAll(async () => { + const { stdout, stderr } = await deploy('with-tests-module'); + if (!stdout.includes('Deploy succeeded')) { + console.error('Deploy failed:', stderr); + throw new Error('Failed to deploy test fixtures'); + } + }); - test('displays instance URL when running tests', async () => { - const { stdout, stderr } = await run('with-tests-module', 'staging'); + test('displays instance URL when running tests', async () => { + const { stdout, stderr } = await run('with-tests-module', 'staging'); - expect(stdout).toMatch(`Running tests on: ${process.env.MPKIT_URL}`); - }); + expect(stdout).toMatch(`Running tests on: ${process.env.MPKIT_URL}`); + }); - // Note: This test requires a staging instance WITHOUT the tests module installed. - // Since integration tests require the tests module to be deployed, this test - // is skipped when run against the same instance. - test.skip('shows error when tests module is not installed', async () => { - const { stderr } = await run('without-tests-module', 'staging'); + // Note: This test requires a staging instance WITHOUT the tests module installed. + // Since integration tests require the tests module to be deployed, this test + // is skipped when run against the same instance. + test.skip('shows error when tests module is not installed', async () => { + const { stderr } = await run('without-tests-module', 'staging'); - expect(stderr).toMatch('Tests module not found'); - }); + expect(stderr).toMatch('Tests module not found'); + }); - test('runs all tests and shows results when no test name provided', async () => { - const { stdout, stderr, code } = await run('with-tests-module', 'staging'); + test('runs all tests and shows results when no test name provided', async () => { + const { stdout, stderr, code } = await run('with-tests-module', 'staging'); - // Verify test execution started - expect(stdout).toMatch('Starting test run...'); + // Verify test execution started + expect(stdout).toMatch('Starting test run...'); - // Verify test results are actually displayed (not just hanging) - // The output should include either test results summary or individual test status - const hasTestResults = stdout.includes('passed') || - stdout.includes('failed') || - stdout.includes('Test Results:') || - stdout.includes('total)'); + // Verify test results are actually displayed (not just hanging) + // The output should include either test results summary or individual test status + const hasTestResults = stdout.includes('passed') || + stdout.includes('failed') || + stdout.includes('Test Results:') || + stdout.includes('total)'); - expect(hasTestResults).toBe(true); - }); + expect(hasTestResults).toBe(true); + }); + + test('runs a single passing test by name and shows success', async () => { + const { stdout, stderr, code } = await run('with-tests-module', 'staging example_test'); + + // Verify test results are displayed + expect(stdout).toMatch('Test Results:'); + + // Verify the passing test is shown as passed (with checkmark) + expect(stdout).toMatch(/✓.*example_test/); - test('runs a single passing test by name and shows success', async () => { - const { stdout, stderr, code } = await run('with-tests-module', 'staging example_test'); + // Verify summary shows 1 passed + expect(stdout).toMatch('1 passed'); - // Verify test results are displayed - expect(stdout).toMatch('Test Results:'); + // Exit code should be 0 for passing test + expect(code).toBe(0); + }); - // Verify the passing test is shown as passed (with checkmark) - expect(stdout).toMatch(/✓.*example_test/); + test('runs a single failing test by name and shows failure', async () => { + const { stdout, stderr, code } = await run('with-tests-module', 'staging failing_test'); - // Verify summary shows 1 passed - expect(stdout).toMatch('1 passed'); + // Verify test results are displayed + expect(stdout + stderr).toMatch('Test Results:'); - // Exit code should be 0 for passing test - expect(code).toBe(0); + // Verify the failing test is shown as failed (with X mark) + expect(stdout + stderr).toMatch(/✗.*failing_test/); + + // Verify summary shows 1 failed + expect(stdout + stderr).toMatch('1 failed'); + + // Exit code should be 1 for failing test + expect(code).toBe(1); + }); + + test('exits with code 1 when running all tests and at least one fails', async () => { + const { stdout, stderr, code } = await run('with-tests-module', 'staging'); + + // Verify we ran multiple tests (the fixture has passing and failing tests) + const output = stdout + stderr; + expect(output).toMatch(/\d+ passed/); + expect(output).toMatch(/\d+ failed/); + + // Exit code should be 1 when at least one test fails + expect(code).toBe(1); + }); }); - test('runs a single failing test by name and shows failure', async () => { - const { stdout, stderr, code } = await run('with-tests-module', 'staging failing_test'); + describe('with only passing tests', () => { + // Deploy the test fixtures with only passing tests + beforeAll(async () => { + const { stdout, stderr } = await deploy('with-passing-tests'); + if (!stdout.includes('Deploy succeeded')) { + console.error('Deploy failed:', stderr); + throw new Error('Failed to deploy test fixtures'); + } + }); + + test('exits with code 0 when running all tests and all pass', async () => { + const { stdout, stderr, code } = await run('with-passing-tests', 'staging'); - // Verify test results are displayed - expect(stdout + stderr).toMatch('Test Results:'); + // Verify test execution started + expect(stdout).toMatch('Starting test run...'); - // Verify the failing test is shown as failed (with X mark) - expect(stdout + stderr).toMatch(/✗.*failing_test/); + // Verify we have passing tests + const output = stdout + stderr; + expect(output).toMatch(/\d+ passed/); - // Verify summary shows 1 failed - expect(stdout + stderr).toMatch('1 failed'); + // Verify no tests failed + expect(output).not.toMatch(/[1-9]\d* failed/); - // Exit code should be 1 for failing test - expect(code).toBe(1); + // Exit code should be 0 when all tests pass + expect(code).toBe(0); + }); }); -}); \ No newline at end of file +}); From daf1d66099779877ba690ad56044d91aed5c9914 Mon Sep 17 00:00:00 2001 From: Maciej Krajowski-Kukiel Date: Fri, 16 Jan 2026 12:59:14 +0100 Subject: [PATCH 7/9] refactor, improve tests --- bin/pos-cli-exec-graphql.js | 21 +- bin/pos-cli-exec-liquid.js | 21 +- bin/pos-cli-test-run.js | 9 +- bin/pos-cli-test.js | 1 - lib/productionEnvironment.js | 43 ++ test/exec-graphql.test.js | 196 ++++-- test/exec-liquid.test.js | 208 +++++-- test/productionEnvironment.test.js | 157 +++++ test/test-run.test.js | 917 ++++++++++++++++++++++++++--- test/test.test.js | 612 ------------------- 10 files changed, 1356 insertions(+), 829 deletions(-) create mode 100644 lib/productionEnvironment.js create mode 100644 test/productionEnvironment.test.js delete mode 100644 test/test.test.js diff --git a/bin/pos-cli-exec-graphql.js b/bin/pos-cli-exec-graphql.js index bd7a1434..76ae12ef 100644 --- a/bin/pos-cli-exec-graphql.js +++ b/bin/pos-cli-exec-graphql.js @@ -2,29 +2,10 @@ const fs = require('fs'); const { program } = require('commander'); -const prompts = require('prompts'); const Gateway = require('../lib/proxy'); const fetchAuthData = require('../lib/settings').fetchSettings; const logger = require('../lib/logger'); - -const isProductionEnvironment = (environment) => { - return environment && (environment.toLowerCase().includes('prod') || environment.toLowerCase().includes('production')); -}; - -const confirmProductionExecution = async (environment) => { - logger.Warn(`WARNING: You are executing GraphQL on a production environment: ${environment}`); - logger.Warn('This could potentially modify production data or cause unintended side effects.'); - logger.Warn(''); - - const response = await prompts({ - type: 'confirm', - name: 'confirmed', - message: `Are you sure you want to continue executing on ${environment}?`, - initial: false - }); - - return response.confirmed; -}; +const { isProductionEnvironment, confirmProductionExecution } = require('../lib/productionEnvironment'); program .name('pos-cli exec graphql') diff --git a/bin/pos-cli-exec-liquid.js b/bin/pos-cli-exec-liquid.js index a8a8000f..0db2d419 100644 --- a/bin/pos-cli-exec-liquid.js +++ b/bin/pos-cli-exec-liquid.js @@ -2,29 +2,10 @@ const fs = require('fs'); const { program } = require('commander'); -const prompts = require('prompts'); const Gateway = require('../lib/proxy'); const fetchAuthData = require('../lib/settings').fetchSettings; const logger = require('../lib/logger'); - -const isProductionEnvironment = (environment) => { - return environment && (environment.toLowerCase().includes('prod') || environment.toLowerCase().includes('production')); -}; - -const confirmProductionExecution = async (environment) => { - logger.Warn(`WARNING: You are executing liquid code on a production environment: ${environment}`); - logger.Warn('This could potentially modify production data or cause unintended side effects.'); - logger.Warn(''); - - const response = await prompts({ - type: 'confirm', - name: 'confirmed', - message: `Are you sure you want to continue executing on ${environment}?`, - initial: false - }); - - return response.confirmed; -}; +const { isProductionEnvironment, confirmProductionExecution } = require('../lib/productionEnvironment'); program .name('pos-cli exec liquid') diff --git a/bin/pos-cli-test-run.js b/bin/pos-cli-test-run.js index 23efe39f..1a2e0cb8 100755 --- a/bin/pos-cli-test-run.js +++ b/bin/pos-cli-test-run.js @@ -201,11 +201,12 @@ class TestLogStream extends EventEmitter { }; // Handle errors - could be object with error details or array + // Check for array first since arrays are also objects in JavaScript if (test.errors) { - if (typeof test.errors === 'object' && Object.keys(test.errors).length > 0) { - testItem.error = JSON.stringify(test.errors); - } else if (Array.isArray(test.errors) && test.errors.length > 0) { + if (Array.isArray(test.errors) && test.errors.length > 0) { testItem.errors = test.errors; + } else if (typeof test.errors === 'object' && Object.keys(test.errors).length > 0) { + testItem.error = JSON.stringify(test.errors); } } @@ -384,7 +385,7 @@ const runSingleTest = async (gateway, name) => { const formatTestLog = (logRow, isTestLog) => { const message = logRow.message || ''; - const logType = logRow.type || ''; + const logType = logRow.error_type || ''; const fullMessage = typeof message === 'string' ? message : JSON.stringify(message); const cleanMessage = fullMessage.replace(/\n$/, ''); diff --git a/bin/pos-cli-test.js b/bin/pos-cli-test.js index 19a51cd8..89c4e846 100755 --- a/bin/pos-cli-test.js +++ b/bin/pos-cli-test.js @@ -6,5 +6,4 @@ program.showHelpAfterError(); program .name('pos-cli test') .command('run [name]', 'run tests on instance (all tests if name not provided)') - .command('list ', 'list available tests on instance') .parse(process.argv); diff --git a/lib/productionEnvironment.js b/lib/productionEnvironment.js new file mode 100644 index 00000000..2ff26370 --- /dev/null +++ b/lib/productionEnvironment.js @@ -0,0 +1,43 @@ +const prompts = require('prompts'); +const logger = require('./logger'); + +/** + * Checks if an environment name indicates a production environment. + * Matches environment names containing 'prod' or 'production' (case-insensitive). + * + * @param {string} environment - The environment name to check + * @returns {boolean} True if the environment is considered production + */ +const isProductionEnvironment = (environment) => { + if (!environment || typeof environment !== 'string') { + return false; + } + const lowerEnv = environment.toLowerCase(); + return lowerEnv.includes('prod') +}; + +/** + * Prompts the user to confirm execution on a production environment. + * + * @param {string} environment - The environment name + * @returns {Promise} True if the user confirmed, false otherwise + */ +const confirmProductionExecution = async (environment) => { + logger.Warn(`WARNING: You are executing on a production environment: ${environment}`); + logger.Warn('This could potentially modify production data or cause unintended side effects.'); + logger.Warn(''); + + const response = await prompts({ + type: 'confirm', + name: 'confirmed', + message: `Are you sure you want to continue executing on ${environment}?`, + initial: false + }); + + return response.confirmed; +}; + +module.exports = { + isProductionEnvironment, + confirmProductionExecution +}; diff --git a/test/exec-graphql.test.js b/test/exec-graphql.test.js index bba2f6b2..0178b4d9 100644 --- a/test/exec-graphql.test.js +++ b/test/exec-graphql.test.js @@ -7,6 +7,10 @@ const Gateway = require('../lib/proxy'); describe('Gateway graph method', () => { const { apiRequest } = require('../lib/apiRequest'); + beforeEach(() => { + jest.clearAllMocks(); + }); + test('calls apiRequest with correct parameters', async () => { const mockResponse = { "data": { @@ -53,72 +57,178 @@ describe('Gateway graph method', () => { describe('exec graphql CLI', () => { const exec = require('./utils/exec'); const cliPath = require('./utils/cliPath'); - - const env = Object.assign(process.env, { - CI: true, + const path = require('path'); + const fs = require('fs'); + const os = require('os'); + + // Use spread operator to avoid mutating global process.env + const env = { + ...process.env, + CI: 'true', MPKIT_URL: 'http://example.com', MPKIT_TOKEN: '1234', MPKIT_EMAIL: 'foo@example.com' - }); + }; - test('requires graphql argument', async () => { - const { code, stderr } = await exec(`${cliPath} exec graphql staging`, { env }); + const CLI_TIMEOUT = 10000; - expect(code).toEqual(1); - expect(stderr).toMatch("error: missing required argument 'graphql'"); - }); + describe('argument validation', () => { + test('requires graphql argument', async () => { + const { code, stderr } = await exec(`${cliPath} exec graphql staging`, { env, timeout: CLI_TIMEOUT }); - test('cancels execution on production environment when user says no', async () => { - const { code, stdout, stderr } = await exec(`echo "n" | ${cliPath} exec graphql production "{ records { results { id } } }"`, { env }); + expect(code).toBe(1); + expect(stderr).toMatch("error: missing required argument 'graphql'"); + }); - expect(code).toEqual(0); - expect(stdout).toMatch('Execution cancelled.'); + test('requires environment argument', async () => { + const { code, stderr } = await exec(`${cliPath} exec graphql`, { env, timeout: CLI_TIMEOUT }); + + expect(code).toBe(1); + expect(stderr).toMatch("error: missing required argument 'environment'"); + }); }); - test('proceeds with execution on production environment when user confirms', async () => { - const { code, stdout, stderr } = await exec(`echo "y" | ${cliPath} exec graphql production "{ records { results { id } } }"`, { env }); + describe('production environment confirmation', () => { + // Detailed production environment detection tests are in productionEnvironment.test.js + // These tests verify the CLI integration with the production environment helper - // This will fail because the mock API isn't set up, but we want to check it doesn't cancel - expect(stdout).not.toMatch('Execution cancelled.'); - expect(stderr).not.toMatch('Execution cancelled.'); - }); + test('prompts for confirmation on production environment and cancels when user declines', async () => { + const { code, stdout } = await exec(`echo "n" | ${cliPath} exec graphql production "{ records { results { id } } }"`, { env, timeout: CLI_TIMEOUT }); - test('does not prompt for non-production environments', async () => { - const { code, stdout, stderr } = await exec(`${cliPath} exec graphql staging "{ records { results { id } } }"`, { env }); + expect(code).toBe(0); + expect(stdout).toMatch('Execution cancelled.'); + }); - expect(stdout).not.toMatch('WARNING: You are executing GraphQL on a production environment'); - expect(stdout).not.toMatch('Execution cancelled.'); - }); + test('proceeds with execution on production environment when user confirms', async () => { + const { stdout, stderr } = await exec(`echo "y" | ${cliPath} exec graphql production "{ records { results { id } } }"`, { env, timeout: CLI_TIMEOUT }); - test('accepts --file flag and reads content from file', async () => { - const fixturePath = require('path').resolve(__dirname, 'fixtures/test-graphql.graphql'); - const { code, stdout, stderr } = await exec(`${cliPath} exec graphql staging --file "${fixturePath}"`, { env }); + // This will fail because the mock API isn't set up, but we want to check it doesn't cancel + expect(stdout).not.toMatch('Execution cancelled.'); + expect(stderr).not.toMatch('Execution cancelled.'); + }); + + test('does not prompt for non-production environments', async () => { + const { stdout, stderr } = await exec(`${cliPath} exec graphql staging "{ records { results { id } } }"`, { env, timeout: CLI_TIMEOUT }); - // Command will fail due to mock API but should not complain about missing graphql argument - expect(stderr).not.toMatch("error: missing required argument 'graphql'"); + // Should not show warning or cancellation message + expect(stdout).not.toMatch('Execution cancelled.'); + expect(stderr).not.toMatch('WARNING'); + }); }); - test('accepts -f shorthand flag and reads content from file', async () => { - const fixturePath = require('path').resolve(__dirname, 'fixtures/test-graphql.graphql'); - const { code, stdout, stderr } = await exec(`${cliPath} exec graphql staging -f "${fixturePath}"`, { env }); + describe('file flag handling', () => { + test('accepts --file flag and reads content from file', async () => { + const fixturePath = path.resolve(__dirname, 'fixtures/test-graphql.graphql'); + const { stderr } = await exec(`${cliPath} exec graphql staging --file "${fixturePath}"`, { env, timeout: CLI_TIMEOUT }); + + // Command will fail due to mock API but should not complain about missing graphql argument + expect(stderr).not.toMatch("error: missing required argument 'graphql'"); + }); + + test('accepts -f shorthand flag and reads content from file', async () => { + const fixturePath = path.resolve(__dirname, 'fixtures/test-graphql.graphql'); + const { stderr } = await exec(`${cliPath} exec graphql staging -f "${fixturePath}"`, { env, timeout: CLI_TIMEOUT }); + + // Command will fail due to mock API but should not complain about missing graphql argument + expect(stderr).not.toMatch("error: missing required argument 'graphql'"); + }); + + test('shows error when file does not exist', async () => { + const { code, stderr } = await exec(`${cliPath} exec graphql staging --file "/nonexistent/path/to/file.graphql"`, { env, timeout: CLI_TIMEOUT }); + + expect(code).toBe(1); + expect(stderr).toMatch('File not found'); + expect(stderr).toMatch('/nonexistent/path/to/file.graphql'); + }); + + test('handles empty file as missing query', async () => { + const emptyFile = path.join(os.tmpdir(), `empty-graphql-${Date.now()}.graphql`); + fs.writeFileSync(emptyFile, ''); - // Command will fail due to mock API but should not complain about missing graphql argument - expect(stderr).not.toMatch("error: missing required argument 'graphql'"); + try { + const { code, stderr } = await exec(`${cliPath} exec graphql staging -f "${emptyFile}"`, { env, timeout: CLI_TIMEOUT }); + + expect(code).toBe(1); + expect(stderr).toMatch("error: missing required argument 'graphql'"); + } finally { + fs.unlinkSync(emptyFile); + } + }); + + test('handles file with only whitespace as missing query', async () => { + const whitespaceFile = path.join(os.tmpdir(), `whitespace-graphql-${Date.now()}.graphql`); + fs.writeFileSync(whitespaceFile, ' \n\t\n '); + + try { + const { code, stderr } = await exec(`${cliPath} exec graphql staging -f "${whitespaceFile}"`, { env, timeout: CLI_TIMEOUT }); + + // Whitespace-only content is truthy, so it passes the !query check + // This documents current behavior - whitespace is accepted as valid query + expect(stderr).not.toMatch("error: missing required argument 'graphql'"); + } finally { + fs.unlinkSync(whitespaceFile); + } + }); + + test('reads file with GraphQL comments', async () => { + const commentFile = path.join(os.tmpdir(), `comment-graphql-${Date.now()}.graphql`); + fs.writeFileSync(commentFile, '# This is a comment\n{ records { results { id } } }'); + + try { + const { stderr } = await exec(`${cliPath} exec graphql staging -f "${commentFile}"`, { env, timeout: CLI_TIMEOUT }); + + expect(stderr).not.toMatch("error: missing required argument 'graphql'"); + } finally { + fs.unlinkSync(commentFile); + } + }); }); - test('shows error when file does not exist', async () => { - const { code, stderr } = await exec(`${cliPath} exec graphql staging --file "/nonexistent/path/to/file.graphql"`, { env }); + describe('error handling', () => { + test('handles connection refused error', async () => { + const badEnv = { + ...process.env, + CI: 'true', + MPKIT_URL: 'http://localhost:1', + MPKIT_TOKEN: 'test-token', + MPKIT_EMAIL: 'test@example.com' + }; - expect(code).toEqual(1); - expect(stderr).toMatch('File not found'); - expect(stderr).toMatch('/nonexistent/path/to/file.graphql'); + const { code, stderr } = await exec(`${cliPath} exec graphql staging "{ records { results { id } } }"`, { env: badEnv, timeout: CLI_TIMEOUT }); + + expect(code).toBe(1); + expect(stderr).toMatch(/Failed to execute graphql|ECONNREFUSED|connect ECONNREFUSED/); + }); + + test('handles invalid URL format', async () => { + const badEnv = { + ...process.env, + CI: 'true', + MPKIT_URL: 'not-a-valid-url', + MPKIT_TOKEN: 'test-token', + MPKIT_EMAIL: 'test@example.com' + }; + + const { code, stderr } = await exec(`${cliPath} exec graphql staging "{ test }"`, { env: badEnv, timeout: CLI_TIMEOUT }); + + expect(code).toBe(1); + }); }); - test('requires either graphql argument or --file option', async () => { - const { code, stderr } = await exec(`${cliPath} exec graphql staging`, { env }); + describe('query edge cases', () => { + test('handles query with unicode characters', async () => { + const { stderr } = await exec(`${cliPath} exec graphql staging "{ records(filter: { name: { value: \\"日本語\\" } }) { results { id } } }"`, { env, timeout: CLI_TIMEOUT }); - expect(code).toEqual(1); - expect(stderr).toMatch("error: missing required argument 'graphql'"); + // Should not fail on argument parsing + expect(stderr).not.toMatch("error: missing required argument 'graphql'"); + }); + + test('handles mutation query', async () => { + const { stderr } = await exec(`${cliPath} exec graphql staging "mutation { record_create(record: { table: \\"test\\" }) { id } }"`, { env, timeout: CLI_TIMEOUT }); + + // Should accept mutations as valid GraphQL + expect(stderr).not.toMatch("error: missing required argument 'graphql'"); + }); }); }); diff --git a/test/exec-liquid.test.js b/test/exec-liquid.test.js index c3eb126a..004c8af4 100644 --- a/test/exec-liquid.test.js +++ b/test/exec-liquid.test.js @@ -7,6 +7,10 @@ const Gateway = require('../lib/proxy'); describe('Gateway liquid method', () => { const { apiRequest } = require('../lib/apiRequest'); + beforeEach(() => { + jest.clearAllMocks(); + }); + test('calls apiRequest with correct parameters', async () => { apiRequest.mockResolvedValue({ result: 'HELLO WORLD', error: null }); @@ -36,72 +40,178 @@ describe('Gateway liquid method', () => { describe('exec liquid CLI', () => { const exec = require('./utils/exec'); const cliPath = require('./utils/cliPath'); - - const env = Object.assign(process.env, { - CI: true, + const path = require('path'); + const fs = require('fs'); + const os = require('os'); + + // Use spread operator to avoid mutating global process.env + const env = { + ...process.env, + CI: 'true', MPKIT_URL: 'http://example.com', MPKIT_TOKEN: '1234', MPKIT_EMAIL: 'foo@example.com' - }); + }; - test('requires code argument', async () => { - const { code, stderr } = await exec(`${cliPath} exec liquid staging`, { env }); + const CLI_TIMEOUT = 10000; - expect(code).toEqual(1); - expect(stderr).toMatch("error: missing required argument 'code'"); - }); + describe('argument validation', () => { + test('requires code argument', async () => { + const { code, stderr } = await exec(`${cliPath} exec liquid staging`, { env, timeout: CLI_TIMEOUT }); - test('cancels execution on production environment when user says no', async () => { - const { code, stdout, stderr } = await exec(`echo "n" | ${cliPath} exec liquid production "{{ 'hello' | upcase }}"`, { env }); + expect(code).toBe(1); + expect(stderr).toMatch("error: missing required argument 'code'"); + }); - expect(code).toEqual(0); - expect(stdout).toMatch('Execution cancelled.'); + test('requires environment argument', async () => { + const { code, stderr } = await exec(`${cliPath} exec liquid`, { env, timeout: CLI_TIMEOUT }); + + expect(code).toBe(1); + expect(stderr).toMatch("error: missing required argument 'environment'"); + }); }); - test('proceeds with execution on production environment when user confirms', async () => { - const { code, stdout, stderr } = await exec(`echo "y" | ${cliPath} exec liquid production "{{ 'hello' | upcase }}"`, { env }); + describe('production environment confirmation', () => { + // Detailed production environment detection tests are in productionEnvironment.test.js + // These tests verify the CLI integration with the production environment helper - // This will fail because the mock API isn't set up, but we want to check it doesn't cancel - expect(stdout).not.toMatch('Execution cancelled.'); - expect(stderr).not.toMatch('Execution cancelled.'); - }); + test('prompts for confirmation on production environment and cancels when user declines', async () => { + const { code, stdout } = await exec(`echo "n" | ${cliPath} exec liquid production "{{ 'hello' | upcase }}"`, { env, timeout: CLI_TIMEOUT }); - test('does not prompt for non-production environments', async () => { - const { code, stdout, stderr } = await exec(`${cliPath} exec liquid staging "{{ 'hello' | upcase }}"`, { env }); + expect(code).toBe(0); + expect(stdout).toMatch('Execution cancelled.'); + }); - expect(stdout).not.toMatch('WARNING: You are executing liquid code on a production environment'); - expect(stdout).not.toMatch('Execution cancelled.'); - }); + test('proceeds with execution on production environment when user confirms', async () => { + const { stdout, stderr } = await exec(`echo "y" | ${cliPath} exec liquid production "{{ 'hello' | upcase }}"`, { env, timeout: CLI_TIMEOUT }); - test('accepts --file flag and reads content from file', async () => { - const fixturePath = require('path').resolve(__dirname, 'fixtures/test-liquid.liquid'); - const { code, stdout, stderr } = await exec(`${cliPath} exec liquid staging --file "${fixturePath}"`, { env }); + // This will fail because the mock API isn't set up, but we want to check it doesn't cancel + expect(stdout).not.toMatch('Execution cancelled.'); + expect(stderr).not.toMatch('Execution cancelled.'); + }); + + test('does not prompt for non-production environments', async () => { + const { stdout, stderr } = await exec(`${cliPath} exec liquid staging "{{ 'hello' | upcase }}"`, { env, timeout: CLI_TIMEOUT }); - // Command will fail due to mock API but should not complain about missing code argument - expect(stderr).not.toMatch("error: missing required argument 'code'"); + // Should not show warning or cancellation message + expect(stdout).not.toMatch('Execution cancelled.'); + expect(stderr).not.toMatch('WARNING'); + }); }); - test('accepts -f shorthand flag and reads content from file', async () => { - const fixturePath = require('path').resolve(__dirname, 'fixtures/test-liquid.liquid'); - const { code, stdout, stderr } = await exec(`${cliPath} exec liquid staging -f "${fixturePath}"`, { env }); + describe('file flag handling', () => { + test('accepts --file flag and reads content from file', async () => { + const fixturePath = path.resolve(__dirname, 'fixtures/test-liquid.liquid'); + const { stderr } = await exec(`${cliPath} exec liquid staging --file "${fixturePath}"`, { env, timeout: CLI_TIMEOUT }); + + // Command will fail due to mock API but should not complain about missing code argument + expect(stderr).not.toMatch("error: missing required argument 'code'"); + }); + + test('accepts -f shorthand flag and reads content from file', async () => { + const fixturePath = path.resolve(__dirname, 'fixtures/test-liquid.liquid'); + const { stderr } = await exec(`${cliPath} exec liquid staging -f "${fixturePath}"`, { env, timeout: CLI_TIMEOUT }); + + // Command will fail due to mock API but should not complain about missing code argument + expect(stderr).not.toMatch("error: missing required argument 'code'"); + }); + + test('shows error when file does not exist', async () => { + const { code, stderr } = await exec(`${cliPath} exec liquid staging --file "/nonexistent/path/to/file.liquid"`, { env, timeout: CLI_TIMEOUT }); + + expect(code).toBe(1); + expect(stderr).toMatch('File not found'); + expect(stderr).toMatch('/nonexistent/path/to/file.liquid'); + }); - // Command will fail due to mock API but should not complain about missing code argument - expect(stderr).not.toMatch("error: missing required argument 'code'"); + test('handles empty file as missing code', async () => { + const emptyFile = path.join(os.tmpdir(), `empty-liquid-${Date.now()}.liquid`); + fs.writeFileSync(emptyFile, ''); + + try { + const { code, stderr } = await exec(`${cliPath} exec liquid staging -f "${emptyFile}"`, { env, timeout: CLI_TIMEOUT }); + + expect(code).toBe(1); + expect(stderr).toMatch("error: missing required argument 'code'"); + } finally { + fs.unlinkSync(emptyFile); + } + }); + + test('handles file with only whitespace as valid code', async () => { + const whitespaceFile = path.join(os.tmpdir(), `whitespace-liquid-${Date.now()}.liquid`); + fs.writeFileSync(whitespaceFile, ' \n\t\n '); + + try { + const { stderr } = await exec(`${cliPath} exec liquid staging -f "${whitespaceFile}"`, { env, timeout: CLI_TIMEOUT }); + + // Whitespace-only content is truthy, so it passes the !code check + // This documents current behavior - whitespace is accepted as valid code + expect(stderr).not.toMatch("error: missing required argument 'code'"); + } finally { + fs.unlinkSync(whitespaceFile); + } + }); + + test('reads file with liquid comments', async () => { + const commentFile = path.join(os.tmpdir(), `comment-liquid-${Date.now()}.liquid`); + fs.writeFileSync(commentFile, "{% comment %}This is a comment{% endcomment %}{{ 'hello' | upcase }}"); + + try { + const { stderr } = await exec(`${cliPath} exec liquid staging -f "${commentFile}"`, { env, timeout: CLI_TIMEOUT }); + + expect(stderr).not.toMatch("error: missing required argument 'code'"); + } finally { + fs.unlinkSync(commentFile); + } + }); }); - test('shows error when file does not exist', async () => { - const { code, stderr } = await exec(`${cliPath} exec liquid staging --file "/nonexistent/path/to/file.liquid"`, { env }); + describe('error handling', () => { + test('handles connection refused error', async () => { + const badEnv = { + ...process.env, + CI: 'true', + MPKIT_URL: 'http://localhost:1', + MPKIT_TOKEN: 'test-token', + MPKIT_EMAIL: 'test@example.com' + }; - expect(code).toEqual(1); - expect(stderr).toMatch('File not found'); - expect(stderr).toMatch('/nonexistent/path/to/file.liquid'); + const { code, stderr } = await exec(`${cliPath} exec liquid staging "{{ 'hello' | upcase }}"`, { env: badEnv, timeout: CLI_TIMEOUT }); + + expect(code).toBe(1); + expect(stderr).toMatch(/Failed to execute liquid|ECONNREFUSED|connect ECONNREFUSED/); + }); + + test('handles invalid URL format', async () => { + const badEnv = { + ...process.env, + CI: 'true', + MPKIT_URL: 'not-a-valid-url', + MPKIT_TOKEN: 'test-token', + MPKIT_EMAIL: 'test@example.com' + }; + + const { code } = await exec(`${cliPath} exec liquid staging "{{ 'test' }}"`, { env: badEnv, timeout: CLI_TIMEOUT }); + + expect(code).toBe(1); + }); }); - test('requires either code argument or --file option', async () => { - const { code, stderr } = await exec(`${cliPath} exec liquid staging`, { env }); + describe('code edge cases', () => { + test('handles code with unicode characters', async () => { + const { stderr } = await exec(`${cliPath} exec liquid staging "{{ '日本語' | upcase }}"`, { env, timeout: CLI_TIMEOUT }); + + // Should not fail on argument parsing + expect(stderr).not.toMatch("error: missing required argument 'code'"); + }); - expect(code).toEqual(1); - expect(stderr).toMatch("error: missing required argument 'code'"); + test('handles code with special liquid tags', async () => { + const { stderr } = await exec(`${cliPath} exec liquid staging "{% assign x = 'hello' %}{{ x }}"`, { env, timeout: CLI_TIMEOUT }); + + // Should accept liquid tags as valid code + expect(stderr).not.toMatch("error: missing required argument 'code'"); + }); }); }); @@ -121,18 +231,18 @@ describe('exec liquid integration', () => { timeout: 30000 }); - expect(code).toEqual(0); + expect(code).toBe(0); expect(stdout).toMatch('HELLO'); expect(stderr).toBe(''); }, 30000); (hasRealCredentials ? test : test.skip)('handles liquid syntax error on real instance', async () => { - const { stdout, stderr, code } = await exec(`${cliPath} exec liquid dev "{{ 'hello' | invalid_filter }}"`, { + const { stderr, code } = await exec(`${cliPath} exec liquid dev "{{ 'hello' | invalid_filter }}"`, { env: process.env, timeout: 30000 }); - expect(code).toEqual(1); + expect(code).toBe(1); expect(stderr).toMatch('Liquid execution error'); }, 30000); @@ -144,7 +254,7 @@ describe('exec liquid integration', () => { }); const afterTime = new Date(); - expect(code).toEqual(0); + expect(code).toBe(0); expect(stderr).toBe(''); // Parse the returned time - liquid to_time returns ISO format like "2023-01-01 12:00:00 +0000" @@ -162,12 +272,12 @@ describe('exec liquid integration', () => { }, 30000); (hasRealCredentials ? test : test.skip)('handles unknown tag error', async () => { - const { stdout, stderr, code } = await exec(`${cliPath} exec liquid dev "{% hello %}"`, { + const { stderr, code } = await exec(`${cliPath} exec liquid dev "{% hello %}"`, { env: process.env, timeout: 30000 }); - expect(code).toEqual(1); + expect(code).toBe(1); expect(stderr).toMatch('Liquid execution error: Liquid syntax error: Unknown tag \'hello\''); }, 30000); }); diff --git a/test/productionEnvironment.test.js b/test/productionEnvironment.test.js new file mode 100644 index 00000000..0bbbd68c --- /dev/null +++ b/test/productionEnvironment.test.js @@ -0,0 +1,157 @@ +// Mock prompts module before requiring the module under test +jest.mock('prompts'); +jest.mock('../lib/logger', () => ({ + Warn: jest.fn(), + Info: jest.fn(), + Error: jest.fn() +})); + +const { isProductionEnvironment, confirmProductionExecution } = require('../lib/productionEnvironment'); +const prompts = require('prompts'); + +describe('isProductionEnvironment', () => { + describe('returns true for production environments', () => { + test('detects "production"', () => { + expect(isProductionEnvironment('production')).toBe(true); + }); + + test('detects "prod"', () => { + expect(isProductionEnvironment('prod')).toBe(true); + }); + + test('detects "PRODUCTION" (uppercase)', () => { + expect(isProductionEnvironment('PRODUCTION')).toBe(true); + }); + + test('detects "PROD" (uppercase)', () => { + expect(isProductionEnvironment('PROD')).toBe(true); + }); + + test('detects "Production" (mixed case)', () => { + expect(isProductionEnvironment('Production')).toBe(true); + }); + + test('detects environment containing "prod" (e.g., "my-prod-server")', () => { + expect(isProductionEnvironment('my-prod-server')).toBe(true); + }); + + test('detects environment containing "production" (e.g., "production-us-east")', () => { + expect(isProductionEnvironment('production-us-east')).toBe(true); + }); + + test('detects "preprod" as production (contains "prod")', () => { + // Note: This is current behavior - "preprod" contains "prod" so it triggers + // If this is undesirable, the regex should be updated to use word boundaries + expect(isProductionEnvironment('preprod')).toBe(true); + }); + + test('detects "prod-replica" as production', () => { + expect(isProductionEnvironment('prod-replica')).toBe(true); + }); + }); + + describe('returns false for non-production environments', () => { + test('returns false for "staging"', () => { + expect(isProductionEnvironment('staging')).toBe(false); + }); + + test('returns false for "development"', () => { + expect(isProductionEnvironment('development')).toBe(false); + }); + + test('returns false for "dev"', () => { + expect(isProductionEnvironment('dev')).toBe(false); + }); + + test('returns false for "test"', () => { + expect(isProductionEnvironment('test')).toBe(false); + }); + + test('returns false for "qa"', () => { + expect(isProductionEnvironment('qa')).toBe(false); + }); + + test('returns false for "local"', () => { + expect(isProductionEnvironment('local')).toBe(false); + }); + + test('returns false for "sandbox"', () => { + expect(isProductionEnvironment('sandbox')).toBe(false); + }); + + test('returns false for "demo"', () => { + expect(isProductionEnvironment('demo')).toBe(false); + }); + }); + + describe('handles edge cases', () => { + test('returns false for null', () => { + expect(isProductionEnvironment(null)).toBe(false); + }); + + test('returns false for undefined', () => { + expect(isProductionEnvironment(undefined)).toBe(false); + }); + + test('returns false for empty string', () => { + expect(isProductionEnvironment('')).toBe(false); + }); + + test('returns false for whitespace-only string', () => { + expect(isProductionEnvironment(' ')).toBe(false); + }); + + test('returns false for number', () => { + expect(isProductionEnvironment(123)).toBe(false); + }); + + test('returns false for object', () => { + expect(isProductionEnvironment({})).toBe(false); + }); + }); +}); + +describe('confirmProductionExecution', () => { + beforeEach(() => { + jest.clearAllMocks(); + }); + + test('returns true when user confirms', async () => { + prompts.mockResolvedValue({ confirmed: true }); + + const result = await confirmProductionExecution('production'); + + expect(result).toBe(true); + expect(prompts).toHaveBeenCalledWith(expect.objectContaining({ + type: 'confirm', + name: 'confirmed', + initial: false + })); + }); + + test('returns false when user declines', async () => { + prompts.mockResolvedValue({ confirmed: false }); + + const result = await confirmProductionExecution('production'); + + expect(result).toBe(false); + }); + + test('returns undefined when user cancels (Ctrl+C)', async () => { + prompts.mockResolvedValue({}); + + const result = await confirmProductionExecution('production'); + + expect(result).toBeUndefined(); + }); + + test('includes environment name in prompt message', async () => { + prompts.mockResolvedValue({ confirmed: true }); + + await confirmProductionExecution('my-prod-server'); + + expect(prompts).toHaveBeenCalledWith(expect.objectContaining({ + message: expect.stringContaining('my-prod-server') + })); + }); +}); diff --git a/test/test-run.test.js b/test/test-run.test.js index 93a9a3ff..682b7fd8 100644 --- a/test/test-run.test.js +++ b/test/test-run.test.js @@ -1,129 +1,886 @@ /* global jest */ -const exec = require('./utils/exec'); -const cliPath = require('./utils/cliPath'); +jest.mock('../lib/apiRequest', () => ({ + apiRequest: jest.fn() +})); require('dotenv').config(); -const cwd = name => `${process.cwd()}/test/fixtures/test/${name}`; +const exec = require('./utils/exec'); +const cliPath = require('./utils/cliPath'); +const Gateway = require('../lib/proxy'); +const cwd = name => `${process.cwd()}/test/fixtures/test/${name}`; const run = (fixtureName, options) => exec(`${cliPath} test run ${options || ''}`, { cwd: cwd(fixtureName), env: process.env }); const deploy = (fixtureName) => exec(`${cliPath} deploy staging`, { cwd: cwd(fixtureName), env: process.env }); -jest.setTimeout(200000); // Test run can take a while due to log polling - -describe('Test run command', () => { - describe('with mixed tests (passing and failing)', () => { - // Deploy the test fixtures before running any tests - beforeAll(async () => { - const { stdout, stderr } = await deploy('with-tests-module'); - if (!stdout.includes('Deploy succeeded')) { - console.error('Deploy failed:', stderr); - throw new Error('Failed to deploy test fixtures'); - } - }); +jest.setTimeout(200000); - test('displays instance URL when running tests', async () => { - const { stdout, stderr } = await run('with-tests-module', 'staging'); +describe('pos-cli test run', () => { + describe('Unit tests', () => { + const { apiRequest } = require('../lib/apiRequest'); - expect(stdout).toMatch(`Running tests on: ${process.env.MPKIT_URL}`); + beforeEach(() => { + apiRequest.mockReset(); }); - // Note: This test requires a staging instance WITHOUT the tests module installed. - // Since integration tests require the tests module to be deployed, this test - // is skipped when run against the same instance. - test.skip('shows error when tests module is not installed', async () => { - const { stderr } = await run('without-tests-module', 'staging'); + describe('Gateway.test(name)', () => { + test('calls apiRequest with correct URL for single test (JS format)', async () => { + apiRequest.mockResolvedValue({ passed: 1, failed: 0, total: 1, tests: [] }); + + const gateway = new Gateway({ url: 'http://example.com', token: '1234', email: 'test@example.com' }); + const result = await gateway.test('example_test'); + + expect(apiRequest).toHaveBeenCalledWith({ + method: 'GET', + uri: 'http://example.com/_tests/run.js?name=example_test', + formData: undefined, + json: true, + forever: undefined, + request: expect.any(Function) + }); + expect(result).toEqual({ passed: 1, failed: 0, total: 1, tests: [] }); + }); + + test('handles test with path in name', async () => { + apiRequest.mockResolvedValue({ passed: 1, failed: 0, total: 1 }); + + const gateway = new Gateway({ url: 'http://example.com', token: '1234', email: 'test@example.com' }); + await gateway.test('test/examples/assertions_test'); + + expect(apiRequest).toHaveBeenCalledWith(expect.objectContaining({ + uri: 'http://example.com/_tests/run.js?name=test/examples/assertions_test' + })); + }); + }); + + describe('Gateway.testRunAsync()', () => { + test('calls apiRequest with run_async endpoint (no .js extension for v1.1.0+)', async () => { + apiRequest.mockResolvedValue({ test_run_id: 'test-run-123' }); + + const gateway = new Gateway({ url: 'http://example.com', token: '1234', email: 'test@example.com' }); + const result = await gateway.testRunAsync(); + + expect(apiRequest).toHaveBeenCalledWith({ + method: 'GET', + uri: 'http://example.com/_tests/run_async', + formData: undefined, + json: true, + forever: undefined, + request: expect.any(Function) + }); + expect(result).toEqual({ test_run_id: 'test-run-123' }); + }); - expect(stderr).toMatch('Tests module not found'); + test('handles error response', async () => { + apiRequest.mockResolvedValue({ error: 'Tests module not found' }); + + const gateway = new Gateway({ url: 'http://example.com', token: '1234', email: 'test@example.com' }); + const result = await gateway.testRunAsync(); + + expect(result).toEqual({ error: 'Tests module not found' }); + }); }); - test('runs all tests and shows results when no test name provided', async () => { - const { stdout, stderr, code } = await run('with-tests-module', 'staging'); + describe('formatTestLog', () => { + let formatTestLog; + + beforeEach(() => { + const testRunModule = require('../bin/pos-cli-test-run'); + formatTestLog = testRunModule.formatTestLog; + }); + + test('highlights test log with test path (new test indicator)', () => { + const logRow = { + message: '{"path": "app/lib/test/example_test.liquid"}', + error_type: 'liquid_test_abc123' + }; + + const result = formatTestLog(logRow, true); + + expect(result).toContain('▶'); + expect(result).toContain('app/lib/test/example_test.liquid'); + }); + + test('displays test log without path normally', () => { + const logRow = { + message: 'Test assertion passed', + error_type: 'liquid_test_abc123' + }; + + const result = formatTestLog(logRow, true); + + expect(result).not.toContain('▶'); + expect(result).toContain('Test assertion passed'); + }); + + test('dims debug logs (type != test_name)', () => { + const logRow = { + message: 'Debug: checking variable value', + error_type: 'debug' + }; + + const result = formatTestLog(logRow, false); + + expect(result).toContain('[debug:debug]'); + expect(result).toContain('Debug: checking variable value'); + }); + + test('shows debug type in dimmed log output', () => { + const logRow = { + message: 'Custom debug message from test', + error_type: 'custom_debug' + }; + + const result = formatTestLog(logRow, false); + + expect(result).toContain('[debug:custom_debug]'); + expect(result).toContain('Custom debug message from test'); + }); + + test('highlights test path in modules directory', () => { + const logRow = { + message: '{"path": "modules/my_module/test/unit_test.liquid"}', + error_type: 'liquid_test_xyz789' + }; - // Verify test execution started - expect(stdout).toMatch('Starting test run...'); + const result = formatTestLog(logRow, true); - // Verify test results are actually displayed (not just hanging) - // The output should include either test results summary or individual test status - const hasTestResults = stdout.includes('passed') || - stdout.includes('failed') || - stdout.includes('Test Results:') || - stdout.includes('total)'); + expect(result).toContain('▶'); + expect(result).toContain('modules/my_module/test/unit_test.liquid'); + }); - expect(hasTestResults).toBe(true); + test('handles message with trailing newline', () => { + const logRow = { + message: 'Test message with newline\n', + error_type: 'liquid_test_abc123' + }; + + const result = formatTestLog(logRow, true); + + expect(result).not.toMatch(/\n$/); + expect(result).toContain('Test message with newline'); + }); + + test('handles non-string message by converting to JSON', () => { + const logRow = { + message: { key: 'value', nested: { data: true } }, + error_type: 'liquid_test_abc123' + }; + + const result = formatTestLog(logRow, true); + + expect(result).toContain('key'); + expect(result).toContain('value'); + }); + }); + + describe('formatDuration', () => { + let formatDuration; + + beforeEach(() => { + const testRunModule = require('../bin/pos-cli-test-run'); + formatDuration = testRunModule.formatDuration; + }); + + test('formats milliseconds under 1 second', () => { + expect(formatDuration(0)).toBe('0ms'); + expect(formatDuration(1)).toBe('1ms'); + expect(formatDuration(500)).toBe('500ms'); + expect(formatDuration(999)).toBe('999ms'); + }); + + test('formats seconds under 1 minute', () => { + expect(formatDuration(1000)).toBe('1.00s'); + expect(formatDuration(1500)).toBe('1.50s'); + expect(formatDuration(2345)).toBe('2.35s'); + expect(formatDuration(59999)).toBe('60.00s'); + }); + + test('formats minutes and seconds', () => { + expect(formatDuration(60000)).toBe('1m 0.00s'); + expect(formatDuration(65000)).toBe('1m 5.00s'); + expect(formatDuration(90000)).toBe('1m 30.00s'); + expect(formatDuration(125500)).toBe('2m 5.50s'); + }); + + test('formats large durations', () => { + expect(formatDuration(300000)).toBe('5m 0.00s'); + expect(formatDuration(3600000)).toBe('60m 0.00s'); + }); }); - test('runs a single passing test by name and shows success', async () => { - const { stdout, stderr, code } = await run('with-tests-module', 'staging example_test'); + describe('TestLogStream', () => { + let TestLogStream; + + beforeEach(() => { + const testRunModule = require('../bin/pos-cli-test-run'); + TestLogStream = testRunModule.TestLogStream; + }); + + describe('parseJsonSummary', () => { + test('parses successful test completion JSON from tests module 1.1.1+', () => { + const stream = new TestLogStream({}); + + const message = JSON.stringify({ + success: true, + total_tests: 5, + total_assertions: 16, + total_errors: 0, + duration_ms: 26, + test_run_id: 'test-run-123', + tests: [ + { name: "test/array_test", success: true, assertions: 2, errors: {} }, + { name: "test/examples/assertions_test", success: true, assertions: 4, errors: {} }, + { name: "test/example_test", success: true, assertions: 5, errors: {} }, + { name: "test/object_test", success: true, assertions: 3, errors: {} }, + { name: "test/string_test", success: true, assertions: 2, errors: {} } + ] + }); + + const result = stream.parseJsonSummary(message); + + expect(result).toEqual({ + total: 5, + passed: 5, + failed: 0, + assertions: 16, + tests: [ + { name: "test/array_test", status: "passed", passed: true, assertions: 2 }, + { name: "test/examples/assertions_test", status: "passed", passed: true, assertions: 4 }, + { name: "test/example_test", status: "passed", passed: true, assertions: 5 }, + { name: "test/object_test", status: "passed", passed: true, assertions: 3 }, + { name: "test/string_test", status: "passed", passed: true, assertions: 2 } + ], + duration: 26 + }); + }); + + test('parses failed test completion JSON with error details', () => { + const stream = new TestLogStream({}); + + const message = JSON.stringify({ + success: false, + total_tests: 3, + total_assertions: 10, + total_errors: 1, + duration_ms: 45, + test_run_id: 'test-run-123', + tests: [ + { name: "test/passing_test", success: true, assertions: 3, errors: {} }, + { name: "test/failing_test", success: false, assertions: 2, errors: { expected: "field to be 2", actual: "field is 1" } }, + { name: "test/another_passing_test", success: true, assertions: 5, errors: {} } + ] + }); + + const result = stream.parseJsonSummary(message); + + expect(result).toEqual({ + total: 3, + passed: 2, + failed: 1, + assertions: 10, + tests: [ + { name: "test/passing_test", status: "passed", passed: true, assertions: 3 }, + { name: "test/failing_test", status: "failed", passed: false, assertions: 2, error: "{\"expected\":\"field to be 2\",\"actual\":\"field is 1\"}" }, + { name: "test/another_passing_test", status: "passed", passed: true, assertions: 5 } + ], + duration: 45 + }); + }); + + test('handles alternative field names (total instead of total_tests)', () => { + const stream = new TestLogStream({}); + + const message = JSON.stringify({ + success: true, + total: 4, + assertions: 8, + duration: 30, + test_run_id: 'test-run-123', + tests: [ + { name: "test1", success: true, assertions: 2, errors: {} }, + { name: "test2", success: true, assertions: 2, errors: {} }, + { name: "test3", success: true, assertions: 2, errors: {} }, + { name: "test4", success: true, assertions: 2, errors: {} } + ] + }); + + const result = stream.parseJsonSummary(message); + + expect(result).toEqual({ + total: 4, + passed: 4, + failed: 0, + assertions: 8, + tests: [ + { name: "test1", status: "passed", passed: true, assertions: 2 }, + { name: "test2", status: "passed", passed: true, assertions: 2 }, + { name: "test3", status: "passed", passed: true, assertions: 2 }, + { name: "test4", status: "passed", passed: true, assertions: 2 } + ], + duration: 30 + }); + }); + + test('returns null for invalid JSON', () => { + const stream = new TestLogStream({}); + const invalidJson = '{ "invalid": json }'; + const result = stream.parseJsonSummary(invalidJson); + expect(result).toBeNull(); + }); + + test('handles empty tests array in summary', () => { + const stream = new TestLogStream({}); + + const message = JSON.stringify({ + success: true, + total_tests: 0, + total_assertions: 0, + duration_ms: 5, + tests: [] + }); + + const result = stream.parseJsonSummary(message); + + expect(result).toEqual({ + total: 0, + passed: 0, + failed: 0, + assertions: 0, + tests: [], + duration: 5 + }); + }); + + test('handles test with no assertions', () => { + const stream = new TestLogStream({}); + + const message = JSON.stringify({ + success: true, + total_tests: 1, + total_assertions: 0, + duration_ms: 10, + tests: [ + { name: "test/empty_test", success: true, assertions: 0, errors: {} } + ] + }); + + const result = stream.parseJsonSummary(message); + + expect(result.tests[0].assertions).toBe(0); + expect(result.assertions).toBe(0); + }); + + test('handles test with array of errors', () => { + const stream = new TestLogStream({}); + + const message = JSON.stringify({ + success: false, + total_tests: 1, + total_errors: 1, + duration_ms: 15, + tests: [ + { + name: "test/multi_error_test", + success: false, + assertions: 3, + errors: ["Error 1", "Error 2", "Error 3"] + } + ] + }); + + const result = stream.parseJsonSummary(message); + + expect(result.tests[0].errors).toEqual(["Error 1", "Error 2", "Error 3"]); + }); + + test('handles missing test name gracefully', () => { + const stream = new TestLogStream({}); + + const message = JSON.stringify({ + success: true, + total_tests: 1, + duration_ms: 10, + tests: [ + { success: true, assertions: 1, errors: {} } + ] + }); + + const result = stream.parseJsonSummary(message); + + expect(result.tests[0].name).toBe('Unknown test'); + }); + }); + + describe('isValidTestSummaryJson', () => { + test('identifies valid test summary JSON', () => { + const stream = new TestLogStream({}); + + const validMessage = JSON.stringify({ + success: true, + total_tests: 5, + total_assertions: 16, + duration_ms: 26, + test_run_id: 'test-run-123', + tests: [] + }); + + expect(stream.isValidTestSummaryJson(validMessage)).toBe(true); + }); + + test('rejects JSON without tests array', () => { + const stream = new TestLogStream({}); + + const invalidMessage = JSON.stringify({ + success: true, + total_tests: 5, + duration_ms: 26 + }); + + expect(stream.isValidTestSummaryJson(invalidMessage)).toBe(false); + }); + + test('rejects JSON without success field', () => { + const stream = new TestLogStream({}); + + const invalidMessage = JSON.stringify({ + total_tests: 5, + duration_ms: 26, + tests: [] + }); + + expect(stream.isValidTestSummaryJson(invalidMessage)).toBe(false); + }); + + test('rejects non-test JSON', () => { + const stream = new TestLogStream({}); + + const invalidMessage = JSON.stringify({ + path: "test/array_test" + }); + + expect(stream.isValidTestSummaryJson(invalidMessage)).toBe(false); + }); + }); + + describe('testCompleted event emission', () => { + test('emits testCompleted only once even when duplicate JSON summaries are received', () => { + const stream = new TestLogStream({}); + const mockEmit = jest.fn(); + stream.emit = mockEmit; + + const testSummaryJson = JSON.stringify({ + success: true, + total_tests: 5, + total_assertions: 16, + duration_ms: 26, + test_run_id: 'test-run-123', + tests: [ + { name: "test/array_test", success: true, assertions: 2, errors: {} }, + { name: "test/examples/assertions_test", success: true, assertions: 4, errors: {} }, + { name: "test/example_test", success: true, assertions: 5, errors: {} }, + { name: "test/object_test", success: true, assertions: 3, errors: {} }, + { name: "test/string_test", success: true, assertions: 2, errors: {} } + ] + }); + + const logRow1 = { id: 1, message: testSummaryJson, error_type: '' }; + const logRow2 = { id: 2, message: testSummaryJson, error_type: '' }; + const logRow3 = { id: 3, message: testSummaryJson, error_type: '' }; + + stream.processLogMessage(logRow1); + stream.processLogMessage(logRow2); + stream.processLogMessage(logRow3); + + expect(mockEmit).toHaveBeenCalledTimes(1); + expect(mockEmit).toHaveBeenCalledWith('testCompleted', expect.any(Object)); + + const emittedResults = mockEmit.mock.calls[0][1]; + expect(emittedResults.total).toBe(5); + expect(emittedResults.passed).toBe(5); + expect(emittedResults.failed).toBe(0); + }); + + test('only processes JSON summaries that match the testRunId', () => { + const stream = new TestLogStream({}, 30000, 'test-run-123'); + const mockEmit = jest.fn(); + stream.emit = mockEmit; + + const matchingSummaryJson = JSON.stringify({ + success: false, + total_tests: 2, + total_errors: 2, + test_run_id: 'test-run-123', + tests: [ + { name: "test1", success: false, assertions: 1, errors: { message: "failed" } }, + { name: "test2", success: false, assertions: 1, errors: { message: "failed" } } + ] + }); + + const nonMatchingSummaryJson = JSON.stringify({ + success: true, + total_tests: 2, + test_run_id: 'test-run-456', + tests: [ + { name: "test1", success: true, assertions: 1, errors: {} }, + { name: "test2", success: true, assertions: 1, errors: {} } + ] + }); + + stream.processLogMessage({ id: 1, message: nonMatchingSummaryJson, error_type: '' }); + stream.processLogMessage({ id: 2, message: matchingSummaryJson, error_type: '' }); + + expect(mockEmit).toHaveBeenCalledTimes(1); + expect(mockEmit).toHaveBeenCalledWith('testCompleted', expect.any(Object)); + + const emittedResults = mockEmit.mock.calls[0][1]; + expect(emittedResults.total).toBe(2); + expect(emittedResults.passed).toBe(0); + expect(emittedResults.failed).toBe(2); + }); + + test('ignores JSON summaries when no testRunId is set (backward compatibility)', () => { + const stream = new TestLogStream({}); + const mockEmit = jest.fn(); + stream.emit = mockEmit; + + const summaryWithIdJson = JSON.stringify({ + success: true, + total_tests: 1, + test_run_id: 'test-run-123', + tests: [{ name: "test1", success: true, assertions: 1, errors: {} }] + }); + + stream.processLogMessage({ id: 1, message: summaryWithIdJson, error_type: '' }); + + expect(mockEmit).toHaveBeenCalledTimes(1); + expect(mockEmit).toHaveBeenCalledWith('testCompleted', expect.any(Object)); + }); + }); + + describe('testName filtering', () => { + test('detects test start with matching testName type', () => { + const stream = new TestLogStream({}, 30000, null, 'liquid_test_abc123'); + const mockEmit = jest.fn(); + stream.emit = mockEmit; + + const startLog = { + id: 1, + message: 'Starting unit tests', + error_type: 'liquid_test_abc123' + }; + + stream.processLogMessage(startLog); + + expect(mockEmit).toHaveBeenCalledWith('testStarted'); + expect(stream.testStarted).toBe(true); + }); + + test('ignores test start with non-matching testName type', () => { + const stream = new TestLogStream({}, 30000, null, 'liquid_test_abc123'); + const mockEmit = jest.fn(); + stream.emit = mockEmit; + + const startLog = { + id: 1, + message: 'Starting unit tests', + error_type: 'liquid_test_different' + }; + + stream.processLogMessage(startLog); + + expect(mockEmit).not.toHaveBeenCalled(); + expect(stream.testStarted).toBe(false); + }); + + test('detects test completion with testName SUMMARY type', () => { + const stream = new TestLogStream({}, 30000, null, 'liquid_test_abc123'); + stream.testStarted = true; + const mockEmit = jest.fn(); + stream.emit = mockEmit; + + const summaryJson = JSON.stringify({ + success: true, + total_tests: 2, + tests: [ + { name: "test1", success: true, assertions: 1, errors: {} }, + { name: "test2", success: true, assertions: 1, errors: {} } + ] + }); + + const summaryLog = { + id: 2, + message: summaryJson, + error_type: 'liquid_test_abc123 SUMMARY' + }; + + stream.processLogMessage(summaryLog); + + expect(mockEmit).toHaveBeenCalledWith('testCompleted', expect.any(Object)); + expect(stream.completed).toBe(true); + }); + + test('ignores summary with non-matching testName SUMMARY type', () => { + const stream = new TestLogStream({}, 30000, null, 'liquid_test_abc123'); + stream.testStarted = true; + const mockEmit = jest.fn(); + stream.emit = mockEmit; + + const summaryJson = JSON.stringify({ + success: true, + total_tests: 2, + tests: [ + { name: "test1", success: true, assertions: 1, errors: {} }, + { name: "test2", success: true, assertions: 1, errors: {} } + ] + }); + + const summaryLog = { + id: 2, + message: summaryJson, + error_type: 'liquid_test_different SUMMARY' + }; + + stream.processLogMessage(summaryLog); + + expect(mockEmit).toHaveBeenCalledWith('testLog', expect.any(Object), false); + expect(stream.completed).toBe(false); + }); + + test('emits testLog with isTestLog=true for logs with matching testName type', () => { + const stream = new TestLogStream({}, 30000, null, 'liquid_test_abc123'); + stream.testStarted = true; + const mockEmit = jest.fn(); + stream.emit = mockEmit; + + const testLog = { + id: 2, + message: '{"path": "app/lib/test/example_test.liquid"}', + error_type: 'liquid_test_abc123' + }; + + stream.processLogMessage(testLog); + + expect(mockEmit).toHaveBeenCalledWith('testLog', testLog, true); + }); + + test('emits testLog with isTestLog=false for logs with different type (debug logs)', () => { + const stream = new TestLogStream({}, 30000, null, 'liquid_test_abc123'); + stream.testStarted = true; + const mockEmit = jest.fn(); + stream.emit = mockEmit; + + const debugLog = { + id: 2, + message: 'Debug: checking variable value', + error_type: 'debug' + }; + + stream.processLogMessage(debugLog); + + expect(mockEmit).toHaveBeenCalledWith('testLog', debugLog, false); + }); + + test('does not emit logs before test started', () => { + const stream = new TestLogStream({}, 30000, null, 'liquid_test_abc123'); + const mockEmit = jest.fn(); + stream.emit = mockEmit; + + const earlyLog = { + id: 1, + message: 'Some early log from previous test run', + error_type: 'liquid_test_abc123' + }; + + stream.processLogMessage(earlyLog); + + expect(mockEmit).not.toHaveBeenCalled(); + }); + + test('does not emit logs after test completed', () => { + const stream = new TestLogStream({}, 30000, null, 'liquid_test_abc123'); + stream.testStarted = true; + stream.completed = true; + const mockEmit = jest.fn(); + stream.emit = mockEmit; + + const lateLog = { + id: 3, + message: 'Some late log', + error_type: 'liquid_test_abc123' + }; + + stream.processLogMessage(lateLog); + + expect(mockEmit).not.toHaveBeenCalled(); + }); + + test('filters noise from past test runs by only processing logs with matching testName', () => { + const stream = new TestLogStream({}, 30000, null, 'liquid_test_current'); + stream.testStarted = true; + const mockEmit = jest.fn(); + stream.emit = mockEmit; + + const pastLog = { + id: 1, + message: 'Some past test log', + error_type: 'liquid_test_past' + }; + + const currentLog = { + id: 2, + message: 'Current test log', + error_type: 'liquid_test_current' + }; + + stream.processLogMessage(pastLog); + stream.processLogMessage(currentLog); + + expect(mockEmit).toHaveBeenCalledTimes(2); + expect(mockEmit).toHaveBeenNthCalledWith(1, 'testLog', pastLog, false); + expect(mockEmit).toHaveBeenNthCalledWith(2, 'testLog', currentLog, true); + }); + }); + }); + }); - // Verify test results are displayed - expect(stdout).toMatch('Test Results:'); + describe('CLI argument validation', () => { + const env = { + ...process.env, + CI: 'true', + MPKIT_URL: 'http://example.com', + MPKIT_TOKEN: '1234', + MPKIT_EMAIL: 'foo@example.com' + }; - // Verify the passing test is shown as passed (with checkmark) - expect(stdout).toMatch(/✓.*example_test/); + const CLI_TIMEOUT = 10000; - // Verify summary shows 1 passed - expect(stdout).toMatch('1 passed'); + test('requires environment argument', async () => { + const { code, stderr } = await exec(`${cliPath} test run`, { env, timeout: CLI_TIMEOUT }); - // Exit code should be 0 for passing test - expect(code).toBe(0); + expect(code).toBe(1); + expect(stderr).toMatch("error: missing required argument 'environment'"); }); - test('runs a single failing test by name and shows failure', async () => { - const { stdout, stderr, code } = await run('with-tests-module', 'staging failing_test'); + test('accepts test name argument', async () => { + const { stderr } = await exec(`${cliPath} test run staging my_test_name`, { env, timeout: CLI_TIMEOUT }); - // Verify test results are displayed - expect(stdout + stderr).toMatch('Test Results:'); + expect(stderr).not.toMatch("error: missing required argument"); + }); + + test('accepts test name with path', async () => { + const { stderr } = await exec(`${cliPath} test run staging test/examples/assertions_test`, { env, timeout: CLI_TIMEOUT }); + + expect(stderr).not.toMatch("error: missing required argument"); + }); - // Verify the failing test is shown as failed (with X mark) - expect(stdout + stderr).toMatch(/✗.*failing_test/); + test('handles connection refused error', async () => { + const badEnv = { + ...process.env, + CI: 'true', + MPKIT_URL: 'http://localhost:1', + MPKIT_TOKEN: 'test-token', + MPKIT_EMAIL: 'test@example.com' + }; - // Verify summary shows 1 failed - expect(stdout + stderr).toMatch('1 failed'); + const { code, stderr } = await exec(`${cliPath} test run staging`, { env: badEnv, timeout: CLI_TIMEOUT }); - // Exit code should be 1 for failing test expect(code).toBe(1); + expect(stderr).toMatch(/ECONNREFUSED|Failed to execute test|connect ECONNREFUSED/); }); - test('exits with code 1 when running all tests and at least one fails', async () => { - const { stdout, stderr, code } = await run('with-tests-module', 'staging'); + test('handles invalid URL format', async () => { + const badEnv = { + ...process.env, + CI: 'true', + MPKIT_URL: 'not-a-valid-url', + MPKIT_TOKEN: 'test-token', + MPKIT_EMAIL: 'test@example.com' + }; - // Verify we ran multiple tests (the fixture has passing and failing tests) - const output = stdout + stderr; - expect(output).toMatch(/\d+ passed/); - expect(output).toMatch(/\d+ failed/); + const { code } = await exec(`${cliPath} test run staging`, { env: badEnv, timeout: CLI_TIMEOUT }); - // Exit code should be 1 when at least one test fails expect(code).toBe(1); }); }); - describe('with only passing tests', () => { - // Deploy the test fixtures with only passing tests - beforeAll(async () => { - const { stdout, stderr } = await deploy('with-passing-tests'); - if (!stdout.includes('Deploy succeeded')) { - console.error('Deploy failed:', stderr); - throw new Error('Failed to deploy test fixtures'); - } + describe('Integration tests', () => { + describe('with mixed tests (passing and failing)', () => { + beforeAll(async () => { + const { stdout, stderr } = await deploy('with-tests-module'); + if (!stdout.includes('Deploy succeeded')) { + console.error('Deploy failed:', stderr); + throw new Error('Failed to deploy test fixtures'); + } + }); + + test.skip('shows error when tests module is not installed', async () => { + const { stderr } = await run('without-tests-module', 'staging'); + + expect(stderr).toMatch('Tests module not found'); + }); + + test('runs all tests, displays URL and results, exits with code 1 when at least one fails', async () => { + const { stdout, stderr, code } = await run('with-tests-module', 'staging'); + const output = stdout + stderr; + + expect(stdout).toMatch(`Running tests on: ${process.env.MPKIT_URL}`); + expect(stdout).toMatch('Starting test run...'); + + const hasTestResults = output.includes('passed') || + output.includes('failed') || + output.includes('Test Results:') || + output.includes('total)'); + expect(hasTestResults).toBe(true); + + expect(output).toMatch(/\d+ passed/); + expect(output).toMatch(/\d+ failed/); + + expect(code).toBe(1); + }); + + test('runs a single passing test by name and shows success', async () => { + const { stdout, stderr, code } = await run('with-tests-module', 'staging example_test'); + + expect(stdout).toMatch('Test Results:'); + expect(stdout).toMatch(/✓.*example_test/); + expect(stdout).toMatch('1 passed'); + + expect(code).toBe(0); + }); + + test('runs a single failing test by name and shows failure', async () => { + const { stdout, stderr, code } = await run('with-tests-module', 'staging failing_test'); + + expect(stdout + stderr).toMatch('Test Results:'); + expect(stdout + stderr).toMatch(/✗.*failing_test/); + expect(stdout + stderr).toMatch('1 failed'); + + expect(code).toBe(1); + }); }); - test('exits with code 0 when running all tests and all pass', async () => { - const { stdout, stderr, code } = await run('with-passing-tests', 'staging'); + describe('with only passing tests', () => { + beforeAll(async () => { + const { stdout, stderr } = await deploy('with-passing-tests'); + if (!stdout.includes('Deploy succeeded')) { + console.error('Deploy failed:', stderr); + throw new Error('Failed to deploy test fixtures'); + } + }); - // Verify test execution started - expect(stdout).toMatch('Starting test run...'); + test('exits with code 0 when running all tests and all pass', async () => { + const { stdout, stderr, code } = await run('with-passing-tests', 'staging'); - // Verify we have passing tests - const output = stdout + stderr; - expect(output).toMatch(/\d+ passed/); + expect(stdout).toMatch('Starting test run...'); - // Verify no tests failed - expect(output).not.toMatch(/[1-9]\d* failed/); + const output = stdout + stderr; + expect(output).toMatch(/\d+ passed/); + expect(output).not.toMatch(/[1-9]\d* failed/); - // Exit code should be 0 when all tests pass - expect(code).toBe(0); + expect(code).toBe(0); + }); }); }); }); diff --git a/test/test.test.js b/test/test.test.js deleted file mode 100644 index d2233be6..00000000 --- a/test/test.test.js +++ /dev/null @@ -1,612 +0,0 @@ -jest.mock('../lib/apiRequest', () => ({ - apiRequest: jest.fn() -})); - -require('dotenv').config(); - -const Gateway = require('../lib/proxy'); - -describe('Gateway test methods', () => { - const { apiRequest } = require('../lib/apiRequest'); - - beforeEach(() => { - apiRequest.mockReset(); - }); - - describe('test(name)', () => { - test('calls apiRequest with correct URL for single test (JS format)', async () => { - apiRequest.mockResolvedValue({ passed: 1, failed: 0, total: 1, tests: [] }); - - const gateway = new Gateway({ url: 'http://example.com', token: '1234', email: 'test@example.com' }); - const result = await gateway.test('example_test'); - - expect(apiRequest).toHaveBeenCalledWith({ - method: 'GET', - uri: 'http://example.com/_tests/run.js?name=example_test', - formData: undefined, - json: true, - forever: undefined, - request: expect.any(Function) - }); - expect(result).toEqual({ passed: 1, failed: 0, total: 1, tests: [] }); - }); - - test('handles test with path in name', async () => { - apiRequest.mockResolvedValue({ passed: 1, failed: 0, total: 1 }); - - const gateway = new Gateway({ url: 'http://example.com', token: '1234', email: 'test@example.com' }); - await gateway.test('test/examples/assertions_test'); - - expect(apiRequest).toHaveBeenCalledWith(expect.objectContaining({ - uri: 'http://example.com/_tests/run.js?name=test/examples/assertions_test' - })); - }); - }); - - describe('testRunAsync()', () => { - test('calls apiRequest with run_async endpoint (no .js extension for v1.1.0+)', async () => { - apiRequest.mockResolvedValue({ test_run_id: 'test-run-123' }); - - const gateway = new Gateway({ url: 'http://example.com', token: '1234', email: 'test@example.com' }); - const result = await gateway.testRunAsync(); - - expect(apiRequest).toHaveBeenCalledWith({ - method: 'GET', - uri: 'http://example.com/_tests/run_async', - formData: undefined, - json: true, - forever: undefined, - request: expect.any(Function) - }); - expect(result).toEqual({ test_run_id: 'test-run-123' }); - }); - - test('handles error response', async () => { - apiRequest.mockResolvedValue({ error: 'Tests module not found' }); - - const gateway = new Gateway({ url: 'http://example.com', token: '1234', email: 'test@example.com' }); - const result = await gateway.testRunAsync(); - - expect(result).toEqual({ error: 'Tests module not found' }); - }); - }); - - describe('formatTestLog', () => { - let formatTestLog; - - beforeEach(() => { - const testRunModule = require('../bin/pos-cli-test-run'); - formatTestLog = testRunModule.formatTestLog; - }); - - test('highlights test log with test path (new test indicator)', () => { - const logRow = { - message: '{"path": "app/lib/test/example_test.liquid"}', - type: 'liquid_test_abc123' - }; - - const result = formatTestLog(logRow, true); - - // Should contain the arrow indicator and path - expect(result).toContain('▶'); - expect(result).toContain('app/lib/test/example_test.liquid'); - }); - - test('displays test log without path normally', () => { - const logRow = { - message: 'Test assertion passed', - type: 'liquid_test_abc123' - }; - - const result = formatTestLog(logRow, true); - - // Should not contain the arrow indicator - expect(result).not.toContain('▶'); - expect(result).toContain('Test assertion passed'); - }); - - test('dims debug logs (type != test_name)', () => { - const logRow = { - message: 'Debug: checking variable value', - type: 'debug' - }; - - const result = formatTestLog(logRow, false); - - // Should contain the debug prefix with type - expect(result).toContain('[debug:debug]'); - expect(result).toContain('Debug: checking variable value'); - }); - - test('shows debug type in dimmed log output', () => { - const logRow = { - message: 'Custom debug message from test', - type: 'custom_debug' - }; - - const result = formatTestLog(logRow, false); - - expect(result).toContain('[debug:custom_debug]'); - expect(result).toContain('Custom debug message from test'); - }); - - test('highlights test path in modules directory', () => { - const logRow = { - message: '{"path": "modules/my_module/test/unit_test.liquid"}', - type: 'liquid_test_xyz789' - }; - - const result = formatTestLog(logRow, true); - - expect(result).toContain('▶'); - expect(result).toContain('modules/my_module/test/unit_test.liquid'); - }); - }); - - describe('TestLogStream', () => { - let TestLogStream; - - beforeEach(() => { - const testRunModule = require('../bin/pos-cli-test-run'); - TestLogStream = testRunModule.TestLogStream; - }); - - describe('parseJsonSummary', () => { - test('parses successful test completion JSON from tests module 1.1.1+', () => { - const stream = new TestLogStream({}); - - const message = JSON.stringify({ - success: true, - total_tests: 5, - total_assertions: 16, - total_errors: 0, - duration_ms: 26, - test_run_id: 'test-run-123', - tests: [ - { name: "test/array_test", success: true, assertions: 2, errors: {} }, - { name: "test/examples/assertions_test", success: true, assertions: 4, errors: {} }, - { name: "test/example_test", success: true, assertions: 5, errors: {} }, - { name: "test/object_test", success: true, assertions: 3, errors: {} }, - { name: "test/string_test", success: true, assertions: 2, errors: {} } - ] - }); - - const result = stream.parseJsonSummary(message); - - expect(result).toEqual({ - total: 5, - passed: 5, - failed: 0, - assertions: 16, - tests: [ - { name: "test/array_test", status: "passed", passed: true, assertions: 2 }, - { name: "test/examples/assertions_test", status: "passed", passed: true, assertions: 4 }, - { name: "test/example_test", status: "passed", passed: true, assertions: 5 }, - { name: "test/object_test", status: "passed", passed: true, assertions: 3 }, - { name: "test/string_test", status: "passed", passed: true, assertions: 2 } - ], - duration: 26 - }); - }); - - test('parses failed test completion JSON with error details', () => { - const stream = new TestLogStream({}); - - const message = JSON.stringify({ - success: false, - total_tests: 3, - total_assertions: 10, - total_errors: 1, - duration_ms: 45, - test_run_id: 'test-run-123', - tests: [ - { name: "test/passing_test", success: true, assertions: 3, errors: {} }, - { name: "test/failing_test", success: false, assertions: 2, errors: { expected: "field to be 2", actual: "field is 1" } }, - { name: "test/another_passing_test", success: true, assertions: 5, errors: {} } - ] - }); - - const result = stream.parseJsonSummary(message); - - expect(result).toEqual({ - total: 3, - passed: 2, - failed: 1, - assertions: 10, - tests: [ - { name: "test/passing_test", status: "passed", passed: true, assertions: 3 }, - { name: "test/failing_test", status: "failed", passed: false, assertions: 2, error: "{\"expected\":\"field to be 2\",\"actual\":\"field is 1\"}" }, - { name: "test/another_passing_test", status: "passed", passed: true, assertions: 5 } - ], - duration: 45 - }); - }); - - test('handles alternative field names (total instead of total_tests)', () => { - const stream = new TestLogStream({}); - - const message = JSON.stringify({ - success: true, - total: 4, - assertions: 8, - duration: 30, - test_run_id: 'test-run-123', - tests: [ - { name: "test1", success: true, assertions: 2, errors: {} }, - { name: "test2", success: true, assertions: 2, errors: {} }, - { name: "test3", success: true, assertions: 2, errors: {} }, - { name: "test4", success: true, assertions: 2, errors: {} } - ] - }); - - const result = stream.parseJsonSummary(message); - - expect(result).toEqual({ - total: 4, - passed: 4, - failed: 0, - assertions: 8, - tests: [ - { name: "test1", status: "passed", passed: true, assertions: 2 }, - { name: "test2", status: "passed", passed: true, assertions: 2 }, - { name: "test3", status: "passed", passed: true, assertions: 2 }, - { name: "test4", status: "passed", passed: true, assertions: 2 } - ], - duration: 30 - }); - }); - - test('returns null for invalid JSON', () => { - const stream = new TestLogStream({}); - const invalidJson = '{ "invalid": json }'; - const result = stream.parseJsonSummary(invalidJson); - expect(result).toBeNull(); - }); - }); - - describe('isValidTestSummaryJson', () => { - test('identifies valid test summary JSON', () => { - const stream = new TestLogStream({}); - - const validMessage = JSON.stringify({ - success: true, - total_tests: 5, - total_assertions: 16, - duration_ms: 26, - test_run_id: 'test-run-123', - tests: [] - }); - - expect(stream.isValidTestSummaryJson(validMessage)).toBe(true); - }); - - test('rejects JSON without tests array', () => { - const stream = new TestLogStream({}); - - const invalidMessage = JSON.stringify({ - success: true, - total_tests: 5, - duration_ms: 26 - }); - - expect(stream.isValidTestSummaryJson(invalidMessage)).toBe(false); - }); - - test('rejects JSON without success field', () => { - const stream = new TestLogStream({}); - - const invalidMessage = JSON.stringify({ - total_tests: 5, - duration_ms: 26, - tests: [] - }); - - expect(stream.isValidTestSummaryJson(invalidMessage)).toBe(false); - }); - - test('rejects non-test JSON', () => { - const stream = new TestLogStream({}); - - const invalidMessage = JSON.stringify({ - path: "test/array_test" - }); - - expect(stream.isValidTestSummaryJson(invalidMessage)).toBe(false); - }); - }); - - describe('testCompleted event emission', () => { - test('emits testCompleted only once even when duplicate JSON summaries are received', () => { - const stream = new TestLogStream({}); - const mockEmit = jest.fn(); - stream.emit = mockEmit; - - const testSummaryJson = JSON.stringify({ - success: true, - total_tests: 5, - total_assertions: 16, - duration_ms: 26, - test_run_id: 'test-run-123', - tests: [ - { name: "test/array_test", success: true, assertions: 2, errors: {} }, - { name: "test/examples/assertions_test", success: true, assertions: 4, errors: {} }, - { name: "test/example_test", success: true, assertions: 5, errors: {} }, - { name: "test/object_test", success: true, assertions: 3, errors: {} }, - { name: "test/string_test", success: true, assertions: 2, errors: {} } - ] - }); - - // Simulate receiving the same JSON summary multiple times - const logRow1 = { id: 1, message: testSummaryJson }; - const logRow2 = { id: 2, message: testSummaryJson }; - const logRow3 = { id: 3, message: testSummaryJson }; - - // Process first occurrence - stream.processLogMessage(logRow1); - // Process second occurrence (duplicate) - stream.processLogMessage(logRow2); - // Process third occurrence (duplicate) - stream.processLogMessage(logRow3); - - // Should emit testCompleted only once - expect(mockEmit).toHaveBeenCalledTimes(1); - expect(mockEmit).toHaveBeenCalledWith('testCompleted', expect.any(Object)); - - // Verify the emitted results are correct - const emittedResults = mockEmit.mock.calls[0][1]; - expect(emittedResults.total).toBe(5); - expect(emittedResults.passed).toBe(5); - expect(emittedResults.failed).toBe(0); - }); - - test('only processes JSON summaries that match the testRunId', () => { - const stream = new TestLogStream({}, 30000, 'test-run-123'); - const mockEmit = jest.fn(); - stream.emit = mockEmit; - - const matchingSummaryJson = JSON.stringify({ - success: false, - total_tests: 2, - total_errors: 2, - test_run_id: 'test-run-123', - tests: [ - { name: "test1", success: false, assertions: 1, errors: { message: "failed" } }, - { name: "test2", success: false, assertions: 1, errors: { message: "failed" } } - ] - }); - - const nonMatchingSummaryJson = JSON.stringify({ - success: true, - total_tests: 2, - test_run_id: 'test-run-456', - tests: [ - { name: "test1", success: true, assertions: 1, errors: {} }, - { name: "test2", success: true, assertions: 1, errors: {} } - ] - }); - - // Process non-matching summary first (should be ignored) - stream.processLogMessage({ id: 1, message: nonMatchingSummaryJson }); - // Process matching summary (should be processed) - stream.processLogMessage({ id: 2, message: matchingSummaryJson }); - - // Should emit testCompleted only once for the matching summary - expect(mockEmit).toHaveBeenCalledTimes(1); - expect(mockEmit).toHaveBeenCalledWith('testCompleted', expect.any(Object)); - - // Verify the emitted results are from the matching (failing) summary - const emittedResults = mockEmit.mock.calls[0][1]; - expect(emittedResults.total).toBe(2); - expect(emittedResults.passed).toBe(0); - expect(emittedResults.failed).toBe(2); - }); - - test('ignores JSON summaries when no testRunId is set (backward compatibility)', () => { - const stream = new TestLogStream({}); // No testRunId - const mockEmit = jest.fn(); - stream.emit = mockEmit; - - const summaryWithIdJson = JSON.stringify({ - success: true, - total_tests: 1, - test_run_id: 'test-run-123', - tests: [{ name: "test1", success: true, assertions: 1, errors: {} }] - }); - - // Should still process summaries even with test_run_id when no filter is set - stream.processLogMessage({ id: 1, message: summaryWithIdJson }); - - expect(mockEmit).toHaveBeenCalledTimes(1); - expect(mockEmit).toHaveBeenCalledWith('testCompleted', expect.any(Object)); - }); - }); - - describe('testName filtering', () => { - test('detects test start with matching testName type', () => { - const stream = new TestLogStream({}, 30000, null, 'liquid_test_abc123'); - const mockEmit = jest.fn(); - stream.emit = mockEmit; - - const startLog = { - id: 1, - message: 'Starting unit tests', - error_type: 'liquid_test_abc123' - }; - - stream.processLogMessage(startLog); - - expect(mockEmit).toHaveBeenCalledWith('testStarted'); - expect(stream.testStarted).toBe(true); - }); - - test('ignores test start with non-matching testName type', () => { - const stream = new TestLogStream({}, 30000, null, 'liquid_test_abc123'); - const mockEmit = jest.fn(); - stream.emit = mockEmit; - - const startLog = { - id: 1, - message: 'Starting unit tests', - error_type: 'liquid_test_different' - }; - - stream.processLogMessage(startLog); - - expect(mockEmit).not.toHaveBeenCalled(); - expect(stream.testStarted).toBe(false); - }); - - test('detects test completion with testName SUMMARY type', () => { - const stream = new TestLogStream({}, 30000, null, 'liquid_test_abc123'); - stream.testStarted = true; // Simulate test already started - const mockEmit = jest.fn(); - stream.emit = mockEmit; - - const summaryJson = JSON.stringify({ - success: true, - total_tests: 2, - tests: [ - { name: "test1", success: true, assertions: 1, errors: {} }, - { name: "test2", success: true, assertions: 1, errors: {} } - ] - }); - - const summaryLog = { - id: 2, - message: summaryJson, - error_type: 'liquid_test_abc123 SUMMARY' - }; - - stream.processLogMessage(summaryLog); - - expect(mockEmit).toHaveBeenCalledWith('testCompleted', expect.any(Object)); - expect(stream.completed).toBe(true); - }); - - test('ignores summary with non-matching testName SUMMARY type', () => { - const stream = new TestLogStream({}, 30000, null, 'liquid_test_abc123'); - stream.testStarted = true; - const mockEmit = jest.fn(); - stream.emit = mockEmit; - - const summaryJson = JSON.stringify({ - success: true, - total_tests: 2, - tests: [ - { name: "test1", success: true, assertions: 1, errors: {} }, - { name: "test2", success: true, assertions: 1, errors: {} } - ] - }); - - const summaryLog = { - id: 2, - message: summaryJson, - error_type: 'liquid_test_different SUMMARY' - }; - - stream.processLogMessage(summaryLog); - - // Should emit testLog instead since it's during the test run - expect(mockEmit).toHaveBeenCalledWith('testLog', expect.any(Object), false); - expect(stream.completed).toBe(false); - }); - - test('emits testLog with isTestLog=true for logs with matching testName type', () => { - const stream = new TestLogStream({}, 30000, null, 'liquid_test_abc123'); - stream.testStarted = true; - const mockEmit = jest.fn(); - stream.emit = mockEmit; - - const testLog = { - id: 2, - message: '{"path": "app/lib/test/example_test.liquid"}', - error_type: 'liquid_test_abc123' - }; - - stream.processLogMessage(testLog); - - expect(mockEmit).toHaveBeenCalledWith('testLog', testLog, true); - }); - - test('emits testLog with isTestLog=false for logs with different type (debug logs)', () => { - const stream = new TestLogStream({}, 30000, null, 'liquid_test_abc123'); - stream.testStarted = true; - const mockEmit = jest.fn(); - stream.emit = mockEmit; - - const debugLog = { - id: 2, - message: 'Debug: checking variable value', - error_type: 'debug' - }; - - stream.processLogMessage(debugLog); - - expect(mockEmit).toHaveBeenCalledWith('testLog', debugLog, false); - }); - - test('does not emit logs before test started', () => { - const stream = new TestLogStream({}, 30000, null, 'liquid_test_abc123'); - const mockEmit = jest.fn(); - stream.emit = mockEmit; - - const earlyLog = { - id: 1, - message: 'Some early log from previous test run', - error_type: 'liquid_test_abc123' - }; - - stream.processLogMessage(earlyLog); - - expect(mockEmit).not.toHaveBeenCalled(); - }); - - test('does not emit logs after test completed', () => { - const stream = new TestLogStream({}, 30000, null, 'liquid_test_abc123'); - stream.testStarted = true; - stream.completed = true; - const mockEmit = jest.fn(); - stream.emit = mockEmit; - - const lateLog = { - id: 3, - message: 'Some late log', - error_type: 'liquid_test_abc123' - }; - - stream.processLogMessage(lateLog); - - expect(mockEmit).not.toHaveBeenCalled(); - }); - - test('filters noise from past test runs by only processing logs with matching testName', () => { - const stream = new TestLogStream({}, 30000, null, 'liquid_test_current'); - stream.testStarted = true; - const mockEmit = jest.fn(); - stream.emit = mockEmit; - - // Log from a past test run with different testName - const pastLog = { - id: 1, - message: 'Some past test log', - error_type: 'liquid_test_past' - }; - - // Log from current test run - const currentLog = { - id: 2, - message: 'Current test log', - error_type: 'liquid_test_current' - }; - - stream.processLogMessage(pastLog); - stream.processLogMessage(currentLog); - - // Should emit both logs, but with different isTestLog values - expect(mockEmit).toHaveBeenCalledTimes(2); - expect(mockEmit).toHaveBeenNthCalledWith(1, 'testLog', pastLog, false); // non-matching type = debug - expect(mockEmit).toHaveBeenNthCalledWith(2, 'testLog', currentLog, true); // matching type = test log - }); - }); - }); -}); \ No newline at end of file From b302a9f8505d56a2ff1d7717eb50e60647f35d74 Mon Sep 17 00:00:00 2001 From: Maciej Krajowski-Kukiel Date: Fri, 16 Jan 2026 14:01:54 +0100 Subject: [PATCH 8/9] extract logic to test-runner --- bin/pos-cli-test-run.js | 501 +--------------------------------- lib/test-runner/formatters.js | 136 +++++++++ lib/test-runner/index.js | 193 +++++++++++++ lib/test-runner/logStream.js | 153 +++++++++++ test/test-run.test.js | 107 +------- 5 files changed, 505 insertions(+), 585 deletions(-) create mode 100644 lib/test-runner/formatters.js create mode 100644 lib/test-runner/index.js create mode 100644 lib/test-runner/logStream.js diff --git a/bin/pos-cli-test-run.js b/bin/pos-cli-test-run.js index 1a2e0cb8..c0d89302 100755 --- a/bin/pos-cli-test-run.js +++ b/bin/pos-cli-test-run.js @@ -1,468 +1,8 @@ #!/usr/bin/env node -const EventEmitter = require('events'); const { program } = require('commander'); -const chalk = require('chalk'); -const Gateway = require('../lib/proxy'); const fetchAuthData = require('../lib/settings').fetchSettings; -const logger = require('../lib/logger'); - -const formatDuration = (ms) => { - if (ms < 1000) return `${ms}ms`; - if (ms < 60000) return `${(ms / 1000).toFixed(2)}s`; - const minutes = Math.floor(ms / 60000); - const seconds = ((ms % 60000) / 1000).toFixed(2); - return `${minutes}m ${seconds}s`; -}; - -class TestLogStream extends EventEmitter { - constructor(authData, timeout = 30000, testRunId = null, testName = null) { - super(); - this.authData = authData; - this.gateway = new Gateway(authData); - this.timeout = timeout; - this.testRunId = testRunId; - this.testName = testName; // The test_name from run_async.js (e.g., "liquid_test_xxxxx") - this.startTime = Date.now(); - this.testStarted = false; - this.completed = false; - this.messageBuffer = ''; // Buffer for multi-line messages - this.lastMessageTime = 0; - this.liquidTestSeen = false; - this.liquidTestTime = 0; - } - - isValidTestSummaryJson(message) { - try { - // Parse as JSON - const obj = JSON.parse(message); - - // Check if it has test summary structure - const hasTestsArray = Array.isArray(obj.tests); - const hasSuccessField = typeof obj.success === 'boolean'; - const hasTotalField = typeof obj.total_tests === 'number' || typeof obj.total === 'number'; - const hasDurationField = typeof obj.duration_ms === 'number' || typeof obj.duration === 'number'; - - // If we have a testRunId, check that it matches - if (this.testRunId && obj.test_run_id !== this.testRunId) { - return false; - } - - return hasTestsArray && hasSuccessField && (hasTotalField || hasDurationField); - } catch (e) { - return false; - } - } - - start() { - this.intervalId = setInterval(() => this.fetchLogs(), 2000); - this.timeoutId = setTimeout(() => { - this.emit('timeout'); - this.stop(); - }, this.timeout); - - logger.Debug('Starting test log streaming...'); - } - - stop() { - if (this.intervalId) { - clearInterval(this.intervalId); - this.intervalId = null; - } - if (this.timeoutId) { - clearTimeout(this.timeoutId); - this.timeoutId = null; - } - } - - fetchLogs() { - this.gateway.logs({ lastId: this.lastId || 0 }) - .then((response) => { - const logs = response && response.logs; - if (!logs) return; - - for (let k in logs) { - const row = logs[k]; - - if (this.lastId && row.id <= this.lastId) continue; - this.lastId = row.id; - - logger.Debug(`[DEBUG] Processing log entry: ${JSON.stringify(row)}`); - this.processLogMessage(row); - } - }) - .catch(error => { - logger.Debug(`Error fetching logs: ${error.message}`); - }); - } - - processLogMessage(row) { - const message = row.message || ''; - const logType = row.error_type || ''; - const fullMessage = typeof message === 'string' ? message : JSON.stringify(message); - const summaryType = this.testName ? `${this.testName} SUMMARY` : null; - - // Only process logs that are related to our test run - // If we have a testName, filter by it - if (this.testName) { - // Check for test start - look for "Starting unit tests" or "Starting test run" with matching type - if (logType === this.testName && (fullMessage.includes('Starting unit tests') || fullMessage.includes('Starting test run')) && !this.testStarted) { - this.testStarted = true; - this.emit('testStarted'); - return; // Don't emit this as a log - } - - // Check for test completion - look for log with type " SUMMARY" - if (!this.completed && logType === summaryType && this.isValidTestSummaryJson(fullMessage)) { - if (!this.liquidTestSeen) { - this.liquidTestSeen = true; - this.liquidTestTime = Date.now(); - } - - // Parse as JSON summary - const testResults = this.parseJsonSummary(fullMessage); - if (testResults) { - this.completed = true; - this.emit('testCompleted', testResults); - this.stop(); - return; - } - } - - // Only show logs after test started and before completion - if (this.testStarted && !this.completed) { - // Determine if this is a test log (type matches test_name) or debug log (any other type) - const isTestLog = logType === this.testName; - this.emit('testLog', row, isTestLog); - } - } else { - // Legacy behavior when testName is not available - // Check for test start - if (fullMessage.includes('Starting unit tests') && !this.testStarted) { - this.testStarted = true; - this.emit('testStarted'); - } - - // Check for test completion - look for the JSON summary format (tests module 1.1.1+) - if (!this.completed && this.isValidTestSummaryJson(fullMessage)) { - if (!this.liquidTestSeen) { - this.liquidTestSeen = true; - this.liquidTestTime = Date.now(); - } - - // Parse as JSON summary - const testResults = this.parseJsonSummary(fullMessage); - if (testResults) { - this.completed = true; - this.emit('testCompleted', testResults); - this.stop(); - return; - } - } - - // Also show individual test logs - if (this.testStarted && !this.completed) { - this.emit('testLog', row, true); - } - } - } - - parseJsonSummary(message) { - try { - // Parse JSON (already validated as valid by isValidTestSummaryJson) - const summary = JSON.parse(message); - - // Map fields from tests module format to our internal format - const total = summary.total_tests || summary.total || 0; - const assertions = summary.total_assertions || summary.assertions || 0; - const duration = summary.duration_ms || summary.duration || 0; - - // Calculate passed/failed from success flag - let passed = 0; - let failed = 0; - - if (summary.success === true) { - passed = total; - failed = summary.total_errors || 0; - } else if (summary.success === false) { - failed = summary.total_errors || 0; - passed = Math.max(0, total - failed); - } - - // Map individual tests - const tests = []; - if (summary.tests && Array.isArray(summary.tests)) { - summary.tests.forEach(test => { - const testItem = { - name: test.name || 'Unknown test', - status: test.success ? 'passed' : 'failed', - passed: test.success, - assertions: test.assertions - }; - - // Handle errors - could be object with error details or array - // Check for array first since arrays are also objects in JavaScript - if (test.errors) { - if (Array.isArray(test.errors) && test.errors.length > 0) { - testItem.errors = test.errors; - } else if (typeof test.errors === 'object' && Object.keys(test.errors).length > 0) { - testItem.error = JSON.stringify(test.errors); - } - } - - tests.push(testItem); - }); - } - - return { - total, - passed, - failed, - assertions, - tests, - duration - }; - } catch (error) { - logger.Debug(`[DEBUG] Failed to parse JSON summary: ${error.message}`); - return null; - } - } - - -} - -const printTestResults = (results, duration) => { - const { passed = 0, failed = 0, total = 0, tests = [] } = results; - - if (tests && tests.length > 0) { - logger.Info('\nTest Results:', { hideTimestamp: true }); - logger.Info('─'.repeat(60), { hideTimestamp: true }); - - tests.forEach(test => { - const status = test.status || (test.passed ? 'passed' : 'failed'); - const icon = status === 'passed' ? '✓' : '✗'; - const name = test.name || test.test_name || 'Unknown test'; - - if (status === 'passed') { - logger.Success(` ${icon} ${name}`, { hideTimestamp: true }); - } else { - logger.Error(` ${icon} ${name}`, { hideTimestamp: true, exit: false, notify: false }); - if (test.error || test.message) { - logger.Error(` Error: ${test.error || test.message}`, { hideTimestamp: true, exit: false, notify: false }); - } - if (test.errors && Array.isArray(test.errors)) { - test.errors.forEach(err => { - logger.Error(` - ${err.message || err}`, { hideTimestamp: true, exit: false, notify: false }); - }); - } - } - }); - - logger.Info('─'.repeat(60), { hideTimestamp: true }); - } - - // Print summary - const totalTests = total || (passed + failed); - const summary = []; - if (passed > 0) summary.push(`${passed} passed`); - if (failed > 0) summary.push(`${failed} failed`); - - const summaryText = summary.length > 0 ? summary.join(', ') : 'No tests executed'; - const durationText = duration ? ` in ${formatDuration(duration)}` : ''; - - if (failed > 0) { - logger.Error(`\n${summaryText} (${totalTests} total)${durationText}`, { hideTimestamp: true, exit: false, notify: false }); - } else if (passed > 0) { - logger.Success(`\n${summaryText} (${totalTests} total)${durationText}`, { hideTimestamp: true }); - } else { - logger.Warn(`\n${summaryText}${durationText}`, { hideTimestamp: true }); - } - - return failed === 0; -}; - -const transformTestResponse = (response) => { - // Transform API response format to the format expected by printTestResults - // API returns: { success, total_tests, total_assertions, total_errors, duration_ms, tests: [{name, success, assertions, errors}] } - // printTestResults expects: { passed, failed, total, tests: [{name, status, passed, error}], duration } - - const total = response.total_tests || response.total || 0; - const totalErrors = response.total_errors || 0; - - let passed = 0; - let failed = 0; - - if (response.success === true) { - passed = total; - failed = totalErrors; - } else { - failed = totalErrors || (total > 0 ? 1 : 0); - passed = Math.max(0, total - failed); - } - - const tests = []; - if (response.tests && Array.isArray(response.tests)) { - response.tests.forEach(test => { - const testItem = { - name: test.name || 'Unknown test', - status: test.success ? 'passed' : 'failed', - passed: test.success, - assertions: test.assertions - }; - - // Handle errors - could be object with error details or array - if (test.errors) { - if (typeof test.errors === 'object' && Object.keys(test.errors).length > 0) { - testItem.error = JSON.stringify(test.errors); - } else if (Array.isArray(test.errors) && test.errors.length > 0) { - testItem.errors = test.errors; - } - } - - tests.push(testItem); - }); - } - - return { - total, - passed, - failed, - assertions: response.total_assertions || 0, - tests, - duration: response.duration_ms || response.duration || 0 - }; -}; - -const runSingleTest = async (gateway, name) => { - const startTime = Date.now(); - - try { - const response = await gateway.test(name); - const duration = Date.now() - startTime; - - if (!response) { - logger.Error('No response received from test endpoint'); - return false; - } - - // Handle error response (not test failure, but actual error) - if (response.error && !response.tests) { - logger.Error(`Test error: ${response.error}`); - return false; - } - - // Handle the JSON response from /_tests/run.js - if (typeof response === 'object') { - const transformedResults = transformTestResponse(response); - return printTestResults(transformedResults, transformedResults.duration || duration); - } - - // Fallback for unexpected response format - logger.Print(JSON.stringify(response, null, 2)); - return true; - } catch (error) { - // Handle HTTP 500 errors that contain valid test results - // The test endpoint returns 500 when tests fail, but includes results in the body - const errorMessage = error.message || ''; - const jsonMatch = errorMessage.match(/^(\d+)\s*-\s*(\{.+\})$/); - - if (jsonMatch) { - try { - const response = JSON.parse(jsonMatch[2]); - if (response.tests && Array.isArray(response.tests)) { - const transformedResults = transformTestResponse(response); - return printTestResults(transformedResults, transformedResults.duration); - } - } catch (parseError) { - // Fall through to generic error handling - } - } - - logger.Error(`Failed to execute test: ${error.message}`); - return false; - } -}; - -const formatTestLog = (logRow, isTestLog) => { - const message = logRow.message || ''; - const logType = logRow.error_type || ''; - const fullMessage = typeof message === 'string' ? message : JSON.stringify(message); - const cleanMessage = fullMessage.replace(/\n$/, ''); - - // Check if this message contains a path to a test file - const hasTestPath = /app\/lib\/test\/|modules\/.*\/test\/|\.liquid/.test(cleanMessage); - - if (isTestLog && hasTestPath) { - // Test log with path - highlight it (new test indicator) - return chalk.cyan.bold(`▶ ${cleanMessage}`); - } else if (isTestLog) { - // Test log without path - normal display - return chalk.white(` ${cleanMessage}`); - } else { - // Debug log (type != test_name) - dim display - return chalk.dim(` [debug:${logType}] ${cleanMessage}`); - } -}; - -const runAllTests = async (gateway, authData) => { - return new Promise((resolve, reject) => { - let resolved = false; - - // Start the test run and get the test_run_id and test_name - logger.Info('Starting test run...'); - gateway.testRunAsync().then(testRunResponse => { - const testRunId = testRunResponse && testRunResponse.test_run_id; - const testName = testRunResponse && testRunResponse.test_name; - - logger.Debug(`Test run started with test_name: ${testName}`); - - const stream = new TestLogStream(authData, 180000, testRunId, testName); // 3 minute timeout for async tests - - const finish = (result) => { - if (resolved) return; // Prevent multiple resolutions - resolved = true; - stream.stop(); - resolve(result); - }; - - stream.on('testStarted', () => { - logger.Info('Test execution started...', { hideTimestamp: true }); - }); - - stream.on('testLog', (logRow, isTestLog) => { - // Display individual test logs with syntax highlighting - const message = logRow.message || ''; - const fullMessage = typeof message === 'string' ? message : JSON.stringify(message); - - // Don't show JSON summary messages as logs - they will be processed as completion signals - if (stream.isValidTestSummaryJson(fullMessage)) { - return; - } - - // Format and display the log with appropriate highlighting - const formattedLog = formatTestLog(logRow, isTestLog); - console.log(formattedLog); - }); - - stream.on('testCompleted', (results) => { - logger.Info('Test execution completed, processing results...', { hideTimestamp: true }); - const success = printTestResults(results, results.duration); - finish(success); - }); - - stream.on('timeout', () => { - logger.Error('Test execution timed out - no completion message received within 3 minutes'); - finish(false); - }); - - // Start listening for logs - stream.start(); - }).catch(error => { - logger.Error(`Failed to start test execution: ${error.message}`); - resolve(false); - }); - }); -}; +const testRunner = require('../lib/test-runner'); program .name('pos-cli test run') @@ -470,37 +10,8 @@ program .argument('[name]', 'name of the test to execute (runs all tests if not provided)') .action(async (environment, name) => { const authData = fetchAuthData(environment, program); - const gateway = new Gateway(authData); - - try { - // Display the instance URL for clarity - logger.Info(`Running tests on: ${authData.url}`, { hideTimestamp: true }); - // First check if tests module is installed - const modules = await gateway.listModules(); - const hasTestsModule = modules.data && modules.data.some(module => module === 'tests'); - - if (!hasTestsModule) { - logger.Error(`Tests module not found. Please install the tests module: - pos-cli modules install tests - pos-cli deploy ${environment} -Then re-run the command.`); - process.exit(1); - } - - let success; - if (name) { - // Run single test with .js format - success = await runSingleTest(gateway, name); - } else { - // Run all tests via run_async with log streaming - success = await runAllTests(gateway, authData); - } - - process.exit(success ? 0 : 1); - } catch (error) { - logger.Error(`Failed to execute test: ${error.message}`); - process.exit(1); - } + const success = await testRunner.run(authData, environment, name); + process.exit(success ? 0 : 1); }); // Only parse arguments if this file is run directly, not when required for testing @@ -508,7 +19,11 @@ if (require.main === module) { program.parse(process.argv); } -// Export for testing +// Export for testing - re-export from lib modules +const { TestLogStream } = require('../lib/test-runner/logStream'); +const { formatDuration, formatTestLog, printTestResults } = require('../lib/test-runner/formatters'); +const { runAllTests } = require('../lib/test-runner'); + module.exports = { TestLogStream, formatDuration, diff --git a/lib/test-runner/formatters.js b/lib/test-runner/formatters.js new file mode 100644 index 00000000..48de4834 --- /dev/null +++ b/lib/test-runner/formatters.js @@ -0,0 +1,136 @@ +const chalk = require('chalk'); +const logger = require('../logger'); + +const formatDuration = (ms) => { + if (ms < 1000) return `${ms}ms`; + if (ms < 60000) return `${(ms / 1000).toFixed(2)}s`; + const minutes = Math.floor(ms / 60000); + const seconds = ((ms % 60000) / 1000).toFixed(2); + return `${minutes}m ${seconds}s`; +}; + +const formatTestLog = (logRow, isTestLog) => { + const message = logRow.message || ''; + const logType = logRow.error_type || ''; + const fullMessage = typeof message === 'string' ? message : JSON.stringify(message); + const cleanMessage = fullMessage.replace(/\n$/, ''); + + // Check if this message contains a path to a test file + const hasTestPath = /app\/lib\/test\/|modules\/.*\/test\/|\.liquid/.test(cleanMessage); + + if (isTestLog && hasTestPath) { + // Test log with path - highlight it (new test indicator) + return chalk.cyan.bold(`▶ ${cleanMessage}`); + } else if (isTestLog) { + // Test log without path - normal display + return chalk.white(` ${cleanMessage}`); + } else { + // Debug log (type != test_name) - dim display + return chalk.dim(` [debug:${logType}] ${cleanMessage}`); + } +}; + +/** + * Transform API response format to internal format for printTestResults + * API returns: { success, total_tests, total_assertions, total_errors, duration_ms, tests: [{name, success, assertions, errors}] } + * Internal format: { passed, failed, total, tests: [{name, status, passed, error}], duration } + */ +const transformTestResponse = (response) => { + const total = response.total_tests || response.total || 0; + const totalErrors = response.total_errors || 0; + const assertions = response.total_assertions || response.assertions || 0; + const duration = response.duration_ms || response.duration || 0; + + let passed = 0; + let failed = 0; + + if (response.success === true) { + passed = total; + failed = totalErrors; + } else { + failed = totalErrors || (total > 0 ? 1 : 0); + passed = Math.max(0, total - failed); + } + + const tests = []; + if (response.tests && Array.isArray(response.tests)) { + response.tests.forEach(test => { + const testItem = { + name: test.name || 'Unknown test', + status: test.success ? 'passed' : 'failed', + passed: test.success, + assertions: test.assertions + }; + + // Handle errors - could be object with error details or array + if (test.errors) { + if (Array.isArray(test.errors) && test.errors.length > 0) { + testItem.errors = test.errors; + } else if (typeof test.errors === 'object' && Object.keys(test.errors).length > 0) { + testItem.error = JSON.stringify(test.errors); + } + } + + tests.push(testItem); + }); + } + + return { total, passed, failed, assertions, tests, duration }; +}; + +const printTestResults = (results, duration) => { + const { passed = 0, failed = 0, total = 0, tests = [] } = results; + + if (tests && tests.length > 0) { + logger.Info('\nTest Results:', { hideTimestamp: true }); + logger.Info('─'.repeat(60), { hideTimestamp: true }); + + tests.forEach(test => { + const status = test.status || (test.passed ? 'passed' : 'failed'); + const icon = status === 'passed' ? '✓' : '✗'; + const name = test.name || test.test_name || 'Unknown test'; + + if (status === 'passed') { + logger.Success(` ${icon} ${name}`, { hideTimestamp: true }); + } else { + logger.Error(` ${icon} ${name}`, { hideTimestamp: true, exit: false, notify: false }); + if (test.error || test.message) { + logger.Error(` Error: ${test.error || test.message}`, { hideTimestamp: true, exit: false, notify: false }); + } + if (test.errors && Array.isArray(test.errors)) { + test.errors.forEach(err => { + logger.Error(` - ${err.message || err}`, { hideTimestamp: true, exit: false, notify: false }); + }); + } + } + }); + + logger.Info('─'.repeat(60), { hideTimestamp: true }); + } + + // Print summary + const totalTests = total || (passed + failed); + const summary = []; + if (passed > 0) summary.push(`${passed} passed`); + if (failed > 0) summary.push(`${failed} failed`); + + const summaryText = summary.length > 0 ? summary.join(', ') : 'No tests executed'; + const durationText = duration ? ` in ${formatDuration(duration)}` : ''; + + if (failed > 0) { + logger.Error(`\n${summaryText} (${totalTests} total)${durationText}`, { hideTimestamp: true, exit: false, notify: false }); + } else if (passed > 0) { + logger.Success(`\n${summaryText} (${totalTests} total)${durationText}`, { hideTimestamp: true }); + } else { + logger.Warn(`\n${summaryText}${durationText}`, { hideTimestamp: true }); + } + + return failed === 0; +}; + +module.exports = { + formatDuration, + formatTestLog, + transformTestResponse, + printTestResults +}; diff --git a/lib/test-runner/index.js b/lib/test-runner/index.js new file mode 100644 index 00000000..2175a296 --- /dev/null +++ b/lib/test-runner/index.js @@ -0,0 +1,193 @@ +const Gateway = require('../proxy'); +const ServerError = require('../ServerError'); +const logger = require('../logger'); +const { TestLogStream } = require('./logStream'); +const { formatTestLog, transformTestResponse, printTestResults } = require('./formatters'); + +// Constants +const ASYNC_TEST_TIMEOUT_MS = 180000; // 3 minutes for async tests + +const runSingleTest = async (gateway, name) => { + const startTime = Date.now(); + + try { + const response = await gateway.test(name); + const duration = Date.now() - startTime; + + if (!response) { + logger.Error('No response received from test endpoint', { exit: false }); + return false; + } + + // Handle error response (not test failure, but actual error) + if (response.error && !response.tests) { + logger.Error(`Test error: ${response.error}`, { exit: false }); + return false; + } + + // Handle the JSON response from /_tests/run.js + if (typeof response === 'object') { + const transformedResults = transformTestResponse(response); + return printTestResults(transformedResults, transformedResults.duration || duration); + } + + // Fallback for unexpected response format + logger.Print(JSON.stringify(response, null, 2)); + return true; + } catch (error) { + // Handle HTTP 500 errors that contain valid test results + // The test endpoint returns 500 when tests fail, but includes results in the body + const errorMessage = error.message || ''; + const jsonMatch = errorMessage.match(/^(\d+)\s*-\s*(\{.+\})$/); + + if (jsonMatch) { + try { + const response = JSON.parse(jsonMatch[2]); + if (response.tests && Array.isArray(response.tests)) { + const transformedResults = transformTestResponse(response); + return printTestResults(transformedResults, transformedResults.duration); + } + } catch (parseError) { + // Fall through to generic error handling + } + } + + // Use ServerError for network errors + if (ServerError.isNetworkError(error)) { + ServerError.handler(error); + return false; + } + + logger.Error(`Failed to execute test: ${error.message}`, { exit: false }); + return false; + } +}; + +const runAllTests = async (gateway, authData) => { + return new Promise((resolve) => { + let resolved = false; + let stream = null; + + const finish = (result) => { + if (resolved) return; + resolved = true; + if (stream) { + stream.stop(); + } + resolve(result); + }; + + logger.Info('Starting test run...'); + + gateway.testRunAsync() + .then(testRunResponse => { + if (!testRunResponse) { + logger.Error('No response received from test endpoint', { exit: false }); + finish(false); + return; + } + + // Handle error response from testRunAsync + if (testRunResponse.error) { + logger.Error(`Test error: ${testRunResponse.error}`, { exit: false }); + finish(false); + return; + } + + const testName = testRunResponse.test_name; + + logger.Debug(`Test run started with test_name: ${testName}`); + + stream = new TestLogStream(authData, ASYNC_TEST_TIMEOUT_MS, testName); + + stream.on('testStarted', () => { + logger.Info('Test execution started...', { hideTimestamp: true }); + }); + + stream.on('testLog', (logRow, isTestLog) => { + const message = logRow.message || ''; + const fullMessage = typeof message === 'string' ? message : JSON.stringify(message); + + // Don't show JSON summary messages as logs - they will be processed as completion signals + if (stream.isValidTestSummaryJson(fullMessage)) { + return; + } + + // Format and display the log with appropriate highlighting + const formattedLog = formatTestLog(logRow, isTestLog); + logger.Print(formattedLog); + }); + + stream.on('testCompleted', (results) => { + logger.Info('Test execution completed, processing results...', { hideTimestamp: true }); + const success = printTestResults(results, results.duration); + finish(success); + }); + + stream.on('timeout', () => { + logger.Error('Test execution timed out - no completion message received within 3 minutes', { exit: false }); + finish(false); + }); + + stream.start(); + }) + .catch(error => { + // Use ServerError for network errors + if (ServerError.isNetworkError(error)) { + ServerError.handler(error); + } else { + logger.Error(`Failed to start test execution: ${error.message}`, { exit: false }); + } + finish(false); + }); + }); +}; + +const checkTestsModule = async (gateway, environment) => { + try { + const modules = await gateway.listModules(); + const hasTestsModule = modules && modules.data && modules.data.some(module => module === 'tests'); + + if (!hasTestsModule) { + logger.Error(`Tests module not found. Please install the tests module: + pos-cli modules install tests + pos-cli deploy ${environment} +Then re-run the command.`, { exit: false }); + return false; + } + return true; + } catch (error) { + if (ServerError.isNetworkError(error)) { + ServerError.handler(error); + } else { + logger.Error(`Failed to check installed modules: ${error.message}`, { exit: false }); + } + return false; + } +}; + +const run = async (authData, environment, testName) => { + const gateway = new Gateway(authData); + + logger.Info(`Running tests on: ${authData.url}`, { hideTimestamp: true }); + + // First check if tests module is installed + const hasModule = await checkTestsModule(gateway, environment); + if (!hasModule) { + return false; + } + + if (testName) { + return runSingleTest(gateway, testName); + } else { + return runAllTests(gateway, authData); + } +}; + +module.exports = { + run, + runSingleTest, + runAllTests, + checkTestsModule, + ASYNC_TEST_TIMEOUT_MS +}; diff --git a/lib/test-runner/logStream.js b/lib/test-runner/logStream.js new file mode 100644 index 00000000..8a6b2f00 --- /dev/null +++ b/lib/test-runner/logStream.js @@ -0,0 +1,153 @@ +const EventEmitter = require('events'); +const Gateway = require('../proxy'); +const logger = require('../logger'); +const { transformTestResponse } = require('./formatters'); + +// Constants +const DEFAULT_TIMEOUT_MS = 30000; +const POLL_INTERVAL_MS = 2000; + +class TestLogStream extends EventEmitter { + constructor(authData, timeout = DEFAULT_TIMEOUT_MS, testName = null) { + super(); + this.authData = authData; + this.gateway = new Gateway(authData); + this.timeout = timeout; + this.testName = testName; + this.startTime = Date.now(); + this.testStarted = false; + this.completed = false; + this.lastId = 0; + this.intervalId = null; + this.timeoutId = null; + } + + isValidTestSummaryJson(message) { + try { + const obj = JSON.parse(message); + + const hasTestsArray = Array.isArray(obj.tests); + const hasSuccessField = typeof obj.success === 'boolean'; + const hasTotalField = typeof obj.total_tests === 'number' || typeof obj.total === 'number'; + const hasDurationField = typeof obj.duration_ms === 'number' || typeof obj.duration === 'number'; + + return hasTestsArray && hasSuccessField && (hasTotalField || hasDurationField); + } catch (e) { + return false; + } + } + + start() { + this.intervalId = setInterval(() => this.fetchLogs(), POLL_INTERVAL_MS); + this.timeoutId = setTimeout(() => { + this.emit('timeout'); + this.stop(); + }, this.timeout); + + logger.Debug('Starting test log streaming...'); + } + + stop() { + if (this.intervalId) { + clearInterval(this.intervalId); + this.intervalId = null; + } + if (this.timeoutId) { + clearTimeout(this.timeoutId); + this.timeoutId = null; + } + } + + async fetchLogs() { + try { + const response = await this.gateway.logs({ lastId: this.lastId || 0 }); + const logs = response && response.logs; + if (!logs) return; + + for (let k in logs) { + const row = logs[k]; + + if (this.lastId && row.id <= this.lastId) continue; + this.lastId = row.id; + + logger.Debug(`[DEBUG] Processing log entry: ${JSON.stringify(row)}`); + this.processLogMessage(row); + } + } catch (error) { + logger.Debug(`Error fetching logs: ${error.message}`); + } + } + + processLogMessage(row) { + const message = row.message || ''; + const logType = row.error_type || ''; + const fullMessage = typeof message === 'string' ? message : JSON.stringify(message); + const summaryType = this.testName ? `${this.testName} SUMMARY` : null; + + // Only process logs that are related to our test run + // If we have a testName, filter by it + if (this.testName) { + // Check for test start - look for "Starting unit tests" or "Starting test run" with matching type + if (logType === this.testName && (fullMessage.includes('Starting unit tests') || fullMessage.includes('Starting test run')) && !this.testStarted) { + this.testStarted = true; + this.emit('testStarted'); + return; + } + + // Check for test completion - look for log with type " SUMMARY" + if (!this.completed && logType === summaryType && this.isValidTestSummaryJson(fullMessage)) { + const testResults = this.parseJsonSummary(fullMessage); + if (testResults) { + this.completed = true; + this.emit('testCompleted', testResults); + this.stop(); + return; + } + } + + // Only show logs after test started and before completion + if (this.testStarted && !this.completed) { + const isTestLog = logType === this.testName; + this.emit('testLog', row, isTestLog); + } + } else { + // Legacy behavior when testName is not available + if (fullMessage.includes('Starting unit tests') && !this.testStarted) { + this.testStarted = true; + this.emit('testStarted'); + } + + // Check for test completion - look for the JSON summary format (tests module 1.1.1+) + if (!this.completed && this.isValidTestSummaryJson(fullMessage)) { + const testResults = this.parseJsonSummary(fullMessage); + if (testResults) { + this.completed = true; + this.emit('testCompleted', testResults); + this.stop(); + return; + } + } + + // Also show individual test logs + if (this.testStarted && !this.completed) { + this.emit('testLog', row, true); + } + } + } + + parseJsonSummary(message) { + try { + const summary = JSON.parse(message); + return transformTestResponse(summary); + } catch (error) { + logger.Debug(`[DEBUG] Failed to parse JSON summary: ${error.message}`); + return null; + } + } +} + +module.exports = { + TestLogStream, + DEFAULT_TIMEOUT_MS, + POLL_INTERVAL_MS +}; diff --git a/test/test-run.test.js b/test/test-run.test.js index 682b7fd8..1483d15d 100644 --- a/test/test-run.test.js +++ b/test/test-run.test.js @@ -10,6 +10,10 @@ const exec = require('./utils/exec'); const cliPath = require('./utils/cliPath'); const Gateway = require('../lib/proxy'); +// Import test-runner modules +const { TestLogStream } = require('../lib/test-runner/logStream'); +const { formatDuration, formatTestLog } = require('../lib/test-runner/formatters'); + const cwd = name => `${process.cwd()}/test/fixtures/test/${name}`; const run = (fixtureName, options) => exec(`${cliPath} test run ${options || ''}`, { cwd: cwd(fixtureName), env: process.env }); const deploy = (fixtureName) => exec(`${cliPath} deploy staging`, { cwd: cwd(fixtureName), env: process.env }); @@ -56,7 +60,7 @@ describe('pos-cli test run', () => { describe('Gateway.testRunAsync()', () => { test('calls apiRequest with run_async endpoint (no .js extension for v1.1.0+)', async () => { - apiRequest.mockResolvedValue({ test_run_id: 'test-run-123' }); + apiRequest.mockResolvedValue({ test_name: 'liquid_test_abc123' }); const gateway = new Gateway({ url: 'http://example.com', token: '1234', email: 'test@example.com' }); const result = await gateway.testRunAsync(); @@ -69,7 +73,7 @@ describe('pos-cli test run', () => { forever: undefined, request: expect.any(Function) }); - expect(result).toEqual({ test_run_id: 'test-run-123' }); + expect(result).toEqual({ test_name: 'liquid_test_abc123' }); }); test('handles error response', async () => { @@ -83,13 +87,6 @@ describe('pos-cli test run', () => { }); describe('formatTestLog', () => { - let formatTestLog; - - beforeEach(() => { - const testRunModule = require('../bin/pos-cli-test-run'); - formatTestLog = testRunModule.formatTestLog; - }); - test('highlights test log with test path (new test indicator)', () => { const logRow = { message: '{"path": "app/lib/test/example_test.liquid"}', @@ -176,13 +173,6 @@ describe('pos-cli test run', () => { }); describe('formatDuration', () => { - let formatDuration; - - beforeEach(() => { - const testRunModule = require('../bin/pos-cli-test-run'); - formatDuration = testRunModule.formatDuration; - }); - test('formats milliseconds under 1 second', () => { expect(formatDuration(0)).toBe('0ms'); expect(formatDuration(1)).toBe('1ms'); @@ -211,13 +201,6 @@ describe('pos-cli test run', () => { }); describe('TestLogStream', () => { - let TestLogStream; - - beforeEach(() => { - const testRunModule = require('../bin/pos-cli-test-run'); - TestLogStream = testRunModule.TestLogStream; - }); - describe('parseJsonSummary', () => { test('parses successful test completion JSON from tests module 1.1.1+', () => { const stream = new TestLogStream({}); @@ -228,7 +211,6 @@ describe('pos-cli test run', () => { total_assertions: 16, total_errors: 0, duration_ms: 26, - test_run_id: 'test-run-123', tests: [ { name: "test/array_test", success: true, assertions: 2, errors: {} }, { name: "test/examples/assertions_test", success: true, assertions: 4, errors: {} }, @@ -265,7 +247,6 @@ describe('pos-cli test run', () => { total_assertions: 10, total_errors: 1, duration_ms: 45, - test_run_id: 'test-run-123', tests: [ { name: "test/passing_test", success: true, assertions: 3, errors: {} }, { name: "test/failing_test", success: false, assertions: 2, errors: { expected: "field to be 2", actual: "field is 1" } }, @@ -297,7 +278,6 @@ describe('pos-cli test run', () => { total: 4, assertions: 8, duration: 30, - test_run_id: 'test-run-123', tests: [ { name: "test1", success: true, assertions: 2, errors: {} }, { name: "test2", success: true, assertions: 2, errors: {} }, @@ -422,7 +402,6 @@ describe('pos-cli test run', () => { total_tests: 5, total_assertions: 16, duration_ms: 26, - test_run_id: 'test-run-123', tests: [] }); @@ -475,7 +454,6 @@ describe('pos-cli test run', () => { total_tests: 5, total_assertions: 16, duration_ms: 26, - test_run_id: 'test-run-123', tests: [ { name: "test/array_test", success: true, assertions: 2, errors: {} }, { name: "test/examples/assertions_test", success: true, assertions: 4, errors: {} }, @@ -502,66 +480,11 @@ describe('pos-cli test run', () => { expect(emittedResults.failed).toBe(0); }); - test('only processes JSON summaries that match the testRunId', () => { - const stream = new TestLogStream({}, 30000, 'test-run-123'); - const mockEmit = jest.fn(); - stream.emit = mockEmit; - - const matchingSummaryJson = JSON.stringify({ - success: false, - total_tests: 2, - total_errors: 2, - test_run_id: 'test-run-123', - tests: [ - { name: "test1", success: false, assertions: 1, errors: { message: "failed" } }, - { name: "test2", success: false, assertions: 1, errors: { message: "failed" } } - ] - }); - - const nonMatchingSummaryJson = JSON.stringify({ - success: true, - total_tests: 2, - test_run_id: 'test-run-456', - tests: [ - { name: "test1", success: true, assertions: 1, errors: {} }, - { name: "test2", success: true, assertions: 1, errors: {} } - ] - }); - - stream.processLogMessage({ id: 1, message: nonMatchingSummaryJson, error_type: '' }); - stream.processLogMessage({ id: 2, message: matchingSummaryJson, error_type: '' }); - - expect(mockEmit).toHaveBeenCalledTimes(1); - expect(mockEmit).toHaveBeenCalledWith('testCompleted', expect.any(Object)); - - const emittedResults = mockEmit.mock.calls[0][1]; - expect(emittedResults.total).toBe(2); - expect(emittedResults.passed).toBe(0); - expect(emittedResults.failed).toBe(2); - }); - - test('ignores JSON summaries when no testRunId is set (backward compatibility)', () => { - const stream = new TestLogStream({}); - const mockEmit = jest.fn(); - stream.emit = mockEmit; - - const summaryWithIdJson = JSON.stringify({ - success: true, - total_tests: 1, - test_run_id: 'test-run-123', - tests: [{ name: "test1", success: true, assertions: 1, errors: {} }] - }); - - stream.processLogMessage({ id: 1, message: summaryWithIdJson, error_type: '' }); - - expect(mockEmit).toHaveBeenCalledTimes(1); - expect(mockEmit).toHaveBeenCalledWith('testCompleted', expect.any(Object)); - }); }); describe('testName filtering', () => { test('detects test start with matching testName type', () => { - const stream = new TestLogStream({}, 30000, null, 'liquid_test_abc123'); + const stream = new TestLogStream({}, 30000, 'liquid_test_abc123'); const mockEmit = jest.fn(); stream.emit = mockEmit; @@ -578,7 +501,7 @@ describe('pos-cli test run', () => { }); test('ignores test start with non-matching testName type', () => { - const stream = new TestLogStream({}, 30000, null, 'liquid_test_abc123'); + const stream = new TestLogStream({}, 30000, 'liquid_test_abc123'); const mockEmit = jest.fn(); stream.emit = mockEmit; @@ -595,7 +518,7 @@ describe('pos-cli test run', () => { }); test('detects test completion with testName SUMMARY type', () => { - const stream = new TestLogStream({}, 30000, null, 'liquid_test_abc123'); + const stream = new TestLogStream({}, 30000, 'liquid_test_abc123'); stream.testStarted = true; const mockEmit = jest.fn(); stream.emit = mockEmit; @@ -622,7 +545,7 @@ describe('pos-cli test run', () => { }); test('ignores summary with non-matching testName SUMMARY type', () => { - const stream = new TestLogStream({}, 30000, null, 'liquid_test_abc123'); + const stream = new TestLogStream({}, 30000, 'liquid_test_abc123'); stream.testStarted = true; const mockEmit = jest.fn(); stream.emit = mockEmit; @@ -649,7 +572,7 @@ describe('pos-cli test run', () => { }); test('emits testLog with isTestLog=true for logs with matching testName type', () => { - const stream = new TestLogStream({}, 30000, null, 'liquid_test_abc123'); + const stream = new TestLogStream({}, 30000, 'liquid_test_abc123'); stream.testStarted = true; const mockEmit = jest.fn(); stream.emit = mockEmit; @@ -666,7 +589,7 @@ describe('pos-cli test run', () => { }); test('emits testLog with isTestLog=false for logs with different type (debug logs)', () => { - const stream = new TestLogStream({}, 30000, null, 'liquid_test_abc123'); + const stream = new TestLogStream({}, 30000, 'liquid_test_abc123'); stream.testStarted = true; const mockEmit = jest.fn(); stream.emit = mockEmit; @@ -683,7 +606,7 @@ describe('pos-cli test run', () => { }); test('does not emit logs before test started', () => { - const stream = new TestLogStream({}, 30000, null, 'liquid_test_abc123'); + const stream = new TestLogStream({}, 30000, 'liquid_test_abc123'); const mockEmit = jest.fn(); stream.emit = mockEmit; @@ -699,7 +622,7 @@ describe('pos-cli test run', () => { }); test('does not emit logs after test completed', () => { - const stream = new TestLogStream({}, 30000, null, 'liquid_test_abc123'); + const stream = new TestLogStream({}, 30000, 'liquid_test_abc123'); stream.testStarted = true; stream.completed = true; const mockEmit = jest.fn(); @@ -717,7 +640,7 @@ describe('pos-cli test run', () => { }); test('filters noise from past test runs by only processing logs with matching testName', () => { - const stream = new TestLogStream({}, 30000, null, 'liquid_test_current'); + const stream = new TestLogStream({}, 30000, 'liquid_test_current'); stream.testStarted = true; const mockEmit = jest.fn(); stream.emit = mockEmit; From 26a3bdf3db2e10ccadb48b62f24f370726de2e6d Mon Sep 17 00:00:00 2001 From: Maciej Krajowski-Kukiel Date: Fri, 16 Jan 2026 15:01:14 +0100 Subject: [PATCH 9/9] fix pos-cli test run --- lib/test-runner/formatters.js | 6 +-- lib/test-runner/index.js | 77 ++++++++++++++++------------ lib/test-runner/logStream.js | 19 +++++-- test/test-run.test.js | 94 ++++++++++++++++++++++++++++++++++- 4 files changed, 154 insertions(+), 42 deletions(-) diff --git a/lib/test-runner/formatters.js b/lib/test-runner/formatters.js index 48de4834..fcd173f6 100644 --- a/lib/test-runner/formatters.js +++ b/lib/test-runner/formatters.js @@ -20,13 +20,13 @@ const formatTestLog = (logRow, isTestLog) => { if (isTestLog && hasTestPath) { // Test log with path - highlight it (new test indicator) - return chalk.cyan.bold(`▶ ${cleanMessage}`); + return chalk.cyan.bold(`▶ ${cleanMessage}\n`); } else if (isTestLog) { // Test log without path - normal display - return chalk.white(` ${cleanMessage}`); + return chalk.white(` ${cleanMessage}\n`); } else { // Debug log (type != test_name) - dim display - return chalk.dim(` [debug:${logType}] ${cleanMessage}`); + return chalk.dim(` [debug:${logType}] ${cleanMessage}\n`); } }; diff --git a/lib/test-runner/index.js b/lib/test-runner/index.js index 2175a296..b2bc4eb6 100644 --- a/lib/test-runner/index.js +++ b/lib/test-runner/index.js @@ -67,6 +67,7 @@ const runAllTests = async (gateway, authData) => { return new Promise((resolve) => { let resolved = false; let stream = null; + let testName = null; const finish = (result) => { if (resolved) return; @@ -79,6 +80,46 @@ const runAllTests = async (gateway, authData) => { logger.Info('Starting test run...'); + // Create stream first so we don't miss any logs + stream = new TestLogStream(authData, ASYNC_TEST_TIMEOUT_MS, null); + + stream.on('testStarted', () => { + logger.Info('Test execution started...', { hideTimestamp: true }); + }); + + stream.on('testLog', (logRow, isTestLog) => { + const message = logRow.message || ''; + const logType = logRow.error_type || ''; + const fullMessage = typeof message === 'string' ? message : JSON.stringify(message); + // Use stream.testName to handle logs that arrive before testRunAsync completes + const summaryType = stream.testName ? `${stream.testName} SUMMARY` : null; + + // Don't show JSON summary messages as logs - they will be processed as completion signals + // Only skip if error_type matches expected summary type to avoid filtering user logs + const isSummaryLog = summaryType && logType === summaryType && stream.isValidTestSummaryJson(fullMessage); + if (isSummaryLog) { + return; + } + + // Format and display the log with appropriate highlighting + const formattedLog = formatTestLog(logRow, isTestLog); + logger.Print(formattedLog); + }); + + stream.on('testCompleted', (results) => { + logger.Info('Test execution completed, processing results...', { hideTimestamp: true }); + const success = printTestResults(results, results.duration); + finish(success); + }); + + stream.on('timeout', () => { + logger.Error('Test execution timed out - no completion message received within 3 minutes', { exit: false }); + finish(false); + }); + + // Start listening for logs before starting the test + stream.start(); + gateway.testRunAsync() .then(testRunResponse => { if (!testRunResponse) { @@ -94,42 +135,12 @@ const runAllTests = async (gateway, authData) => { return; } - const testName = testRunResponse.test_name; + testName = testRunResponse.test_name; logger.Debug(`Test run started with test_name: ${testName}`); - stream = new TestLogStream(authData, ASYNC_TEST_TIMEOUT_MS, testName); - - stream.on('testStarted', () => { - logger.Info('Test execution started...', { hideTimestamp: true }); - }); - - stream.on('testLog', (logRow, isTestLog) => { - const message = logRow.message || ''; - const fullMessage = typeof message === 'string' ? message : JSON.stringify(message); - - // Don't show JSON summary messages as logs - they will be processed as completion signals - if (stream.isValidTestSummaryJson(fullMessage)) { - return; - } - - // Format and display the log with appropriate highlighting - const formattedLog = formatTestLog(logRow, isTestLog); - logger.Print(formattedLog); - }); - - stream.on('testCompleted', (results) => { - logger.Info('Test execution completed, processing results...', { hideTimestamp: true }); - const success = printTestResults(results, results.duration); - finish(success); - }); - - stream.on('timeout', () => { - logger.Error('Test execution timed out - no completion message received within 3 minutes', { exit: false }); - finish(false); - }); - - stream.start(); + // Update stream with the test name for filtering + stream.testName = testName; }) .catch(error => { // Use ServerError for network errors diff --git a/lib/test-runner/logStream.js b/lib/test-runner/logStream.js index 8a6b2f00..42010ca9 100644 --- a/lib/test-runner/logStream.js +++ b/lib/test-runner/logStream.js @@ -84,6 +84,10 @@ class TestLogStream extends EventEmitter { const fullMessage = typeof message === 'string' ? message : JSON.stringify(message); const summaryType = this.testName ? `${this.testName} SUMMARY` : null; + // Check if this is a test path log + const cleanMessage = fullMessage.replace(/\n$/, ''); + const hasTestPath = /app\/lib\/test\/|modules\/.*\/test\/|\.liquid/.test(cleanMessage); + // Only process logs that are related to our test run // If we have a testName, filter by it if (this.testName) { @@ -105,10 +109,17 @@ class TestLogStream extends EventEmitter { } } - // Only show logs after test started and before completion + // Show all logs after test started and before completion (including user logs) if (this.testStarted && !this.completed) { - const isTestLog = logType === this.testName; - this.emit('testLog', row, isTestLog); + this.emit('testLog', row, logType === this.testName); + } + + // Handle case where test framework doesn't emit "Starting unit tests" log + // If we see a test path log with matching error_type, treat it as test start + if (!this.testStarted && !this.completed && logType === this.testName && hasTestPath) { + this.testStarted = true; + this.emit('testStarted'); + this.emit('testLog', row, true); } } else { // Legacy behavior when testName is not available @@ -128,7 +139,7 @@ class TestLogStream extends EventEmitter { } } - // Also show individual test logs + // Show all logs after test started and before completion (including user logs) if (this.testStarted && !this.completed) { this.emit('testLog', row, true); } diff --git a/test/test-run.test.js b/test/test-run.test.js index 1483d15d..a0cfe89a 100644 --- a/test/test-run.test.js +++ b/test/test-run.test.js @@ -155,10 +155,43 @@ describe('pos-cli test run', () => { const result = formatTestLog(logRow, true); - expect(result).not.toMatch(/\n$/); + expect(result).toMatch(/\n$/); expect(result).toContain('Test message with newline'); }); + test('formatTestLog output ends with newline for test path messages', () => { + const logRow = { + message: '{"path": "app/lib/test/example_test.liquid"}', + error_type: 'liquid_test_abc123' + }; + + const result = formatTestLog(logRow, true); + + expect(result).toMatch(/\n$/); + }); + + test('formatTestLog output ends with newline for regular test logs', () => { + const logRow = { + message: 'Test assertion passed', + error_type: 'liquid_test_abc123' + }; + + const result = formatTestLog(logRow, true); + + expect(result).toMatch(/\n$/); + }); + + test('formatTestLog output ends with newline for debug logs', () => { + const logRow = { + message: 'Debug: checking variable value', + error_type: 'debug' + }; + + const result = formatTestLog(logRow, false); + + expect(result).toMatch(/\n$/); + }); + test('handles non-string message by converting to JSON', () => { const logRow = { message: { key: 'value', nested: { data: true } }, @@ -605,7 +638,26 @@ describe('pos-cli test run', () => { expect(mockEmit).toHaveBeenCalledWith('testLog', debugLog, false); }); - test('does not emit logs before test started', () => { + test('emits user log with JSON content that matches test summary structure', () => { + const stream = new TestLogStream({}, 30000, 'liquid_test_abc123'); + stream.testStarted = true; + const mockEmit = jest.fn(); + stream.emit = mockEmit; + + // User log that accidentally has JSON structure similar to test summary + const userLogWithJson = { + id: 2, + message: JSON.stringify({ success: true, total: 5, data: 'user content' }), + error_type: 'world' + }; + + stream.processLogMessage(userLogWithJson); + + // Should be emitted as testLog because error_type is 'world', not 'liquid_test_abc123 SUMMARY' + expect(mockEmit).toHaveBeenCalledWith('testLog', userLogWithJson, false); + }); + + test('does not emit logs before test started (non-test-path logs)', () => { const stream = new TestLogStream({}, 30000, 'liquid_test_abc123'); const mockEmit = jest.fn(); stream.emit = mockEmit; @@ -618,9 +670,29 @@ describe('pos-cli test run', () => { stream.processLogMessage(earlyLog); + // Non-test-path logs should not be emitted before testStarted expect(mockEmit).not.toHaveBeenCalled(); }); + test('emits test path logs even before testStarted flag is set', () => { + const stream = new TestLogStream({}, 30000, 'liquid_test_abc123'); + const mockEmit = jest.fn(); + stream.emit = mockEmit; + + // First log with test path should be emitted even if testStarted is false + // This handles cases where test framework doesn't emit "Starting unit tests" log + const testPathLog = { + id: 1, + message: '{"path":"modules/community/test/commands/can_profiles_test"}', + error_type: 'liquid_test_abc123' + }; + + stream.processLogMessage(testPathLog); + + // Test path log should be emitted to mark test start + expect(mockEmit).toHaveBeenCalledWith('testLog', testPathLog, true); + }); + test('does not emit logs after test completed', () => { const stream = new TestLogStream({}, 30000, 'liquid_test_abc123'); stream.testStarted = true; @@ -639,6 +711,24 @@ describe('pos-cli test run', () => { expect(mockEmit).not.toHaveBeenCalled(); }); + test('emits user logs with custom error_type (not testName) as testLog events', () => { + const stream = new TestLogStream({}, 30000, 'liquid_test_abc123'); + stream.testStarted = true; + const mockEmit = jest.fn(); + stream.emit = mockEmit; + + const userLog = { + id: 2, + message: 'hello', + error_type: 'world' + }; + + stream.processLogMessage(userLog); + + // User log should be emitted as testLog with isTestLog=false (it's not from test framework) + expect(mockEmit).toHaveBeenCalledWith('testLog', userLog, false); + }); + test('filters noise from past test runs by only processing logs with matching testName', () => { const stream = new TestLogStream({}, 30000, 'liquid_test_current'); stream.testStarted = true;