From 396be251fb691e6a663d8c47bdaa46510b05e19a Mon Sep 17 00:00:00 2001 From: ttt161 Date: Sun, 31 Aug 2025 06:55:04 +0300 Subject: [PATCH 1/5] remove machinegun specific --- .env | 2 - Dockerfile.dev | 6 - benchmark/base_bench/.env | 5 - benchmark/base_bench/.gitignore | 19 -- benchmark/base_bench/.image.dev | 0 benchmark/base_bench/Dockerfile.dev | 17 -- benchmark/base_bench/LICENSE | 191 ------------------ benchmark/base_bench/Makefile | 119 ----------- benchmark/base_bench/README.md | 8 - benchmark/base_bench/config/sys.config | 55 ----- benchmark/base_bench/docker-compose.yml | 46 ----- benchmark/base_bench/rebar.config | 9 - benchmark/base_bench/src/base_bench.app.src | 16 -- benchmark/base_bench/src/base_bench.erl | 83 -------- benchmark/base_bench/src/base_bench_app.erl | 18 -- .../base_bench/src/base_bench_processor.erl | 45 ----- benchmark/base_bench/src/base_bench_sup.erl | 35 ---- config/sys.config | 17 -- docker-compose.yml | 45 ----- rebar.config | 5 +- rebar.lock | 21 +- src/prg_notifier.erl | 184 ----------------- src/prg_worker.erl | 72 ++----- src/prg_worker_sidecar.erl | 31 --- src/progressor.app.src | 5 +- src/progressor_app.erl | 7 - test/prg_ct_hook.erl | 42 ---- 27 files changed, 25 insertions(+), 1078 deletions(-) delete mode 100644 benchmark/base_bench/.env delete mode 100644 benchmark/base_bench/.gitignore delete mode 100644 benchmark/base_bench/.image.dev delete mode 100644 benchmark/base_bench/Dockerfile.dev delete mode 100644 benchmark/base_bench/LICENSE delete mode 100644 benchmark/base_bench/Makefile delete mode 100644 benchmark/base_bench/README.md delete mode 100644 benchmark/base_bench/config/sys.config delete mode 100644 benchmark/base_bench/docker-compose.yml delete mode 100644 benchmark/base_bench/rebar.config delete mode 100644 benchmark/base_bench/src/base_bench.app.src delete mode 100644 benchmark/base_bench/src/base_bench.erl delete mode 100644 benchmark/base_bench/src/base_bench_app.erl delete mode 100644 benchmark/base_bench/src/base_bench_processor.erl delete mode 100644 benchmark/base_bench/src/base_bench_sup.erl delete mode 100644 src/prg_notifier.erl diff --git a/.env b/.env index 6233462..7be13a7 100644 --- a/.env +++ b/.env @@ -1,5 +1,3 @@ SERVICE_NAME=progressor OTP_VERSION=27.1.2 REBAR_VERSION=3.24 -THRIFT_VERSION=0.14.2.3 -CONFLUENT_PLATFORM_VERSION=7.2.15 diff --git a/Dockerfile.dev b/Dockerfile.dev index e4cfa53..6712d9a 100644 --- a/Dockerfile.dev +++ b/Dockerfile.dev @@ -3,12 +3,6 @@ ARG OTP_VERSION FROM docker.io/library/erlang:${OTP_VERSION} SHELL ["/bin/bash", "-o", "pipefail", "-c"] -# Install thrift compiler -ARG THRIFT_VERSION -ARG TARGETARCH -RUN wget -q -O- "https://github.com/valitydev/thrift/releases/download/${THRIFT_VERSION}/thrift-${THRIFT_VERSION}-linux-${TARGETARCH}.tar.gz" \ - | tar -xvz -C /usr/local/bin/ - # Set env ENV CHARSET=UTF-8 ENV LANG=C.UTF-8 diff --git a/benchmark/base_bench/.env b/benchmark/base_bench/.env deleted file mode 100644 index 52fdb79..0000000 --- a/benchmark/base_bench/.env +++ /dev/null @@ -1,5 +0,0 @@ -SERVICE_NAME=progressor -OTP_VERSION=25.3 -REBAR_VERSION=3.18 -THRIFT_VERSION=0.14.2.3 -CONFLUENT_PLATFORM_VERSION=5.1.2 diff --git a/benchmark/base_bench/.gitignore b/benchmark/base_bench/.gitignore deleted file mode 100644 index f1c4554..0000000 --- a/benchmark/base_bench/.gitignore +++ /dev/null @@ -1,19 +0,0 @@ -.rebar3 -_* -.eunit -*.o -*.beam -*.plt -*.swp -*.swo -.erlang.cookie -ebin -log -erl_crash.dump -.rebar -logs -_build -.idea -*.iml -rebar3.crashdump -*~ diff --git a/benchmark/base_bench/.image.dev b/benchmark/base_bench/.image.dev deleted file mode 100644 index e69de29..0000000 diff --git a/benchmark/base_bench/Dockerfile.dev b/benchmark/base_bench/Dockerfile.dev deleted file mode 100644 index 4137dde..0000000 --- a/benchmark/base_bench/Dockerfile.dev +++ /dev/null @@ -1,17 +0,0 @@ -ARG OTP_VERSION - -FROM docker.io/library/erlang:${OTP_VERSION} -SHELL ["/bin/bash", "-o", "pipefail", "-c"] - -# Install thrift compiler -ARG THRIFT_VERSION -ARG TARGETARCH=amd64 -RUN wget -q -O- "https://github.com/valitydev/thrift/releases/download/${THRIFT_VERSION}/thrift-${THRIFT_VERSION}-linux-${TARGETARCH}.tar.gz" \ - | tar -xvz -C /usr/local/bin/ - -# Set env -ENV CHARSET=UTF-8 -ENV LANG=C.UTF-8 - -# Set runtime -CMD ["/bin/bash"] \ No newline at end of file diff --git a/benchmark/base_bench/LICENSE b/benchmark/base_bench/LICENSE deleted file mode 100644 index 071b569..0000000 --- a/benchmark/base_bench/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2024, Anonymous . - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/benchmark/base_bench/Makefile b/benchmark/base_bench/Makefile deleted file mode 100644 index 48f0473..0000000 --- a/benchmark/base_bench/Makefile +++ /dev/null @@ -1,119 +0,0 @@ -# HINT -# Use this file to override variables here. -# For example, to run with podman put `DOCKER=podman` there. --include Makefile.env - -# NOTE -# Variables specified in `.env` file are used to pick and setup specific -# component versions, both when building a development image and when running -# CI workflows on GH Actions. This ensures that tasks run with `wc-` prefix -# (like `wc-dialyze`) are reproducible between local machine and CI runners. -DOTENV := $(shell grep -v '^\#' .env) - -# Development images -DEV_IMAGE_TAG = $(TEST_CONTAINER_NAME)-dev -DEV_IMAGE_ID = $(file < .image.dev) - -DOCKER ?= docker -DOCKERCOMPOSE ?= docker-compose -DOCKERCOMPOSE_W_ENV = DEV_IMAGE_TAG=$(DEV_IMAGE_TAG) $(DOCKERCOMPOSE) -f docker-compose.yml -REBAR ?= rebar3 -TEST_CONTAINER_NAME ?= testrunner - -all: compile - -.PHONY: dev-image clean-dev-image wc-shell test - -dev-image: .image.dev - -get-submodules: - git submodule init - git submodule update - -.image.dev: get-submodules Dockerfile.dev .env - env $(DOTENV) $(DOCKERCOMPOSE_W_ENV) build $(TEST_CONTAINER_NAME) - $(DOCKER) image ls -q -f "reference=$(DEV_IMAGE_ID)" | head -n1 > $@ - -clean-dev-image: -ifneq ($(DEV_IMAGE_ID),) - $(DOCKER) image rm -f $(DEV_IMAGE_TAG) - rm .image.dev -endif - -DOCKER_WC_OPTIONS := -v $(PWD):$(PWD) --workdir $(PWD) -DOCKER_WC_EXTRA_OPTIONS ?= --rm -DOCKER_RUN = $(DOCKER) run -t $(DOCKER_WC_OPTIONS) $(DOCKER_WC_EXTRA_OPTIONS) - -DOCKERCOMPOSE_RUN = $(DOCKERCOMPOSE_W_ENV) run --rm $(DOCKER_WC_OPTIONS) - -# Utility tasks - -wc-shell: dev-image - $(DOCKER_RUN) --interactive --tty $(DEV_IMAGE_TAG) - -wc-%: dev-image - $(DOCKER_RUN) $(DEV_IMAGE_TAG) make $* - -wdeps-shell: dev-image - $(DOCKERCOMPOSE_RUN) $(TEST_CONTAINER_NAME) su; \ - $(DOCKERCOMPOSE_W_ENV) down - -wdeps-%: dev-image - $(DOCKERCOMPOSE_RUN) -T $(TEST_CONTAINER_NAME) make $(if $(MAKE_ARGS),$(MAKE_ARGS) $*,$*); \ - res=$$?; \ - $(DOCKERCOMPOSE_W_ENV) down; \ - exit $$res - -# Submodules tasks - -make_psql_migration: - make -C psql-migration/ - mkdir -p bin - mkdir -p migrations - cp ./psql-migration/_build/default/bin/psql_migration ./bin - -# Rebar tasks - -rebar-shell: - $(REBAR) shell - -compile: - $(REBAR) compile - -xref: - $(REBAR) xref - -lint: - $(REBAR) lint - -check-format: - $(REBAR) fmt -c - -dialyze: - $(REBAR) as test dialyzer - -release: - $(REBAR) as prod release - -eunit: - $(REBAR) eunit --cover - -common-test: - $(REBAR) ct --cover - -cover: - $(REBAR) covertool generate - -format: - $(REBAR) fmt -w - -clean: - $(REBAR) clean - -distclean: clean-build-image - rm -rf _build - -test: eunit common-test - -cover-report: - $(REBAR) cover diff --git a/benchmark/base_bench/README.md b/benchmark/base_bench/README.md deleted file mode 100644 index b5cc9b1..0000000 --- a/benchmark/base_bench/README.md +++ /dev/null @@ -1,8 +0,0 @@ -base_bench -===== - -``` -$ rebar3 shell - -1> base_bench:start(ProcessCount, Duration). -``` diff --git a/benchmark/base_bench/config/sys.config b/benchmark/base_bench/config/sys.config deleted file mode 100644 index 74fb470..0000000 --- a/benchmark/base_bench/config/sys.config +++ /dev/null @@ -1,55 +0,0 @@ -[ - - {progressor, [ - {defaults, #{ - storage => #{ - client => prg_pg_backend, - options => #{ - pool => default_pool - } - }, - retry_policy => #{ - initial_timeout => 3, - backoff_coefficient => 1.2, - max_timeout => 180, - max_attempts => 2, - non_retryable_errors => [] - }, - task_scan_timeout => 15, %% seconds - worker_pool_size => 200, - process_step_timeout => 30 %% seconds - }}, - - {namespaces, #{ - default => #{ - processor => #{ - client => base_bench_processor, - options => #{} - } - } - }} - ]}, - - {epg_connector, [ - {databases, #{ - progressor_db => #{ - host => "postgres", - port => 5432, - database => "progressor_db", - username => "progressor", - password => "progressor" - } - }}, - {pools, #{ - default_pool => #{ - database => progressor_db, - size => 200 - } - }} - ]}, - - {prometheus, [ - {collectors, [default]} - ]} - -]. diff --git a/benchmark/base_bench/docker-compose.yml b/benchmark/base_bench/docker-compose.yml deleted file mode 100644 index 613b2e1..0000000 --- a/benchmark/base_bench/docker-compose.yml +++ /dev/null @@ -1,46 +0,0 @@ -services: - testrunner: - image: $DEV_IMAGE_TAG - environment: - WORK_DIR: $PWD - build: - dockerfile: Dockerfile.dev - context: . - args: - OTP_VERSION: $OTP_VERSION - THRIFT_VERSION: $THRIFT_VERSION - volumes: - - .:$PWD - hostname: progressor - depends_on: - postgres: - condition: service_healthy - working_dir: $PWD - - postgres: - image: postgres:15-bookworm - command: -c 'max_connections=250' - environment: - POSTGRES_DB: "progressor_db" - POSTGRES_USER: "progressor" - POSTGRES_PASSWORD: "progressor" - PGDATA: "/tmp/postgresql/data/pgdata" - volumes: - - progressor-data:/tmp/postgresql/data - ports: - - "5432:5432" - healthcheck: - test: ["CMD-SHELL", "pg_isready -U progressor -d progressor_db"] - interval: 10s - timeout: 5s - retries: 5 - start_period: 10s - restart: unless-stopped - deploy: - resources: - limits: - cpus: '4' - memory: 8G - -volumes: - progressor-data: diff --git a/benchmark/base_bench/rebar.config b/benchmark/base_bench/rebar.config deleted file mode 100644 index 8762eba..0000000 --- a/benchmark/base_bench/rebar.config +++ /dev/null @@ -1,9 +0,0 @@ -{erl_opts, [debug_info]}. -{deps, [ - {progressor, {git, "https://github.com/valitydev/progressor.git", {branch, "epic/TD-927/progressor-prototype"}}} -]}. - -{shell, [ - {config, "config/sys.config"}, - {apps, [base_bench, progressor]} -]}. diff --git a/benchmark/base_bench/src/base_bench.app.src b/benchmark/base_bench/src/base_bench.app.src deleted file mode 100644 index 520bc13..0000000 --- a/benchmark/base_bench/src/base_bench.app.src +++ /dev/null @@ -1,16 +0,0 @@ -{application, base_bench, - [{description, "An OTP application"}, - {vsn, "0.1.0"}, - {registered, []}, - {mod, {base_bench_app, []}}, - {applications, - [kernel, - stdlib, - progressor - ]}, - {env,[]}, - {modules, []}, - - {licenses, ["MIT"]}, - {links, []} - ]}. diff --git a/benchmark/base_bench/src/base_bench.erl b/benchmark/base_bench/src/base_bench.erl deleted file mode 100644 index 8289d2c..0000000 --- a/benchmark/base_bench/src/base_bench.erl +++ /dev/null @@ -1,83 +0,0 @@ --module(base_bench). - --behaviour(gen_server). - --export([start/2]). --export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, - code_change/3]). - --define(SERVER, ?MODULE). --define(NS, default). - --record(base_bench_state, {ids, duration}). - -%%%=================================================================== -%%% Spawning and gen_server implementation -%%%=================================================================== - -start(ProcCount, DurationSec) -> - gen_server:start_link({local, ?SERVER}, ?MODULE, [ProcCount, DurationSec], []). - -init([ProcCount, DurationSec]) -> - IDs = start_processes(ProcCount, DurationSec), - io:format(user, "Started: ~p~n", [calendar:system_time_to_rfc3339(erlang:system_time(second))]), - erlang:start_timer(DurationSec * 1000, self(), finish), - {ok, #base_bench_state{ids = IDs, duration = DurationSec}}. - -handle_call(_Request, _From, State = #base_bench_state{}) -> - {reply, ok, State}. - -handle_cast(_Request, State = #base_bench_state{}) -> - {noreply, State}. - -handle_info({timeout, _TimerRef, finish}, State = #base_bench_state{ids = IDs, duration = Duration}) -> - io:format(user, "Stopping: ~p~n", [calendar:system_time_to_rfc3339(erlang:system_time(second))]), - {EvCountsList, ErrCount} = stop_processes(IDs), - io:format(user, "Finish: ~p~n", [calendar:system_time_to_rfc3339(erlang:system_time(second))]), - Max = lists:max(EvCountsList), - Min = lists:min(EvCountsList), - Avg = lists:sum(EvCountsList) / erlang:length(EvCountsList), - Rate = lists:sum(EvCountsList) / Duration, - io:format(user, "Max: ~p~nMin: ~p~nAvg: ~p~nRate: ~p~nErrors: ~p~n", [Max, Min, Avg, Rate, ErrCount]), - {noreply, State}; -handle_info(_Info, State = #base_bench_state{}) -> - {noreply, State}. - -terminate(_Reason, _State = #base_bench_state{}) -> - ok. - -code_change(_OldVsn, State = #base_bench_state{}, _Extra) -> - {ok, State}. - -%%%=================================================================== -%%% Internal functions -%%%=================================================================== - -start_processes(N, Duration) -> - lists:foldl(fun(_N, Acc) -> [start_process(Duration) | Acc] end, [], lists:seq(1, N)). - -start_process(Duration) -> - Id = gen_id(), - _ = spawn(progressor, init, [#{ns => ?NS, id => Id, args => term_to_binary(Duration)}]), - Id. -%% - -stop_processes(IDs) -> - lists:foldl(fun(Id, Acc) -> - do_call(#{ns => ?NS, id => Id, args => <<>>}, Acc) - end, {[], 0}, IDs). -%% - -do_call(Req, {Evs, Errs}) -> - try progressor:call(Req) of - {ok, EventsCount} -> - {[EventsCount | Evs], Errs}; - {error, _} -> - {Evs, Errs + 1} - catch - _Ex:_Er -> - {Evs, Errs + 1} - end. - -gen_id() -> - base64:encode(crypto:strong_rand_bytes(8)). diff --git a/benchmark/base_bench/src/base_bench_app.erl b/benchmark/base_bench/src/base_bench_app.erl deleted file mode 100644 index fba2a12..0000000 --- a/benchmark/base_bench/src/base_bench_app.erl +++ /dev/null @@ -1,18 +0,0 @@ -%%%------------------------------------------------------------------- -%% @doc base_bench public API -%% @end -%%%------------------------------------------------------------------- - --module(base_bench_app). - --behaviour(application). - --export([start/2, stop/1]). - -start(_StartType, _StartArgs) -> - base_bench_sup:start_link(). - -stop(_State) -> - ok. - -%% internal functions diff --git a/benchmark/base_bench/src/base_bench_processor.erl b/benchmark/base_bench/src/base_bench_processor.erl deleted file mode 100644 index 71e3898..0000000 --- a/benchmark/base_bench/src/base_bench_processor.erl +++ /dev/null @@ -1,45 +0,0 @@ --module(base_bench_processor). - --export([process/3]). - -process({init, Args, _Process}, _Opts, _Ctx) -> - Fin = erlang:system_time(second) + binary_to_term(Args), - Result = #{ - metadata => #{finish => Fin}, - events => [event(1)], - action => #{set_timer => erlang:system_time(second)} - }, - {ok, Result}; -%% -process({timeout, _Args, #{history := History, metadata := Meta} = _Process}, _Opts, _Ctx) -> - %Random = rand:uniform(40), - %timer:sleep(60 + Random), - #{finish := FinishTime} = Meta, - Action = case FinishTime > erlang:system_time(second) of - true -> #{set_timer => erlang:system_time(second)}; - false -> unset_timer - end, - %Action = #{set_timer => erlang:system_time(second)}, - NextId = erlang:length(History) + 1, - Result = #{ - events => [event(NextId)], - action => Action - }, - {ok, Result}; -%% -process({call, _Args, #{history := History} = _Process}, _Opts, _Ctx) -> - Result = #{ - response => erlang:length(History), - events => [], - action => unset_timer - }, - {ok, Result}. -%% - -event(Id) -> - #{ - event_id => Id, - timestamp => erlang:system_time(second), - metadata => #{<<"format_version">> => 1}, - payload => erlang:term_to_binary({bin, crypto:strong_rand_bytes(64)}) - }. diff --git a/benchmark/base_bench/src/base_bench_sup.erl b/benchmark/base_bench/src/base_bench_sup.erl deleted file mode 100644 index 2477459..0000000 --- a/benchmark/base_bench/src/base_bench_sup.erl +++ /dev/null @@ -1,35 +0,0 @@ -%%%------------------------------------------------------------------- -%% @doc base_bench top level supervisor. -%% @end -%%%------------------------------------------------------------------- - --module(base_bench_sup). - --behaviour(supervisor). - --export([start_link/0]). - --export([init/1]). - --define(SERVER, ?MODULE). - -start_link() -> - supervisor:start_link({local, ?SERVER}, ?MODULE, []). - -%% sup_flags() = #{strategy => strategy(), % optional -%% intensity => non_neg_integer(), % optional -%% period => pos_integer()} % optional -%% child_spec() = #{id => child_id(), % mandatory -%% start => mfargs(), % mandatory -%% restart => restart(), % optional -%% shutdown => shutdown(), % optional -%% type => worker(), % optional -%% modules => modules()} % optional -init([]) -> - SupFlags = #{strategy => one_for_all, - intensity => 0, - period => 1}, - ChildSpecs = [], - {ok, {SupFlags, ChildSpecs}}. - -%% internal functions diff --git a/config/sys.config b/config/sys.config index bd44f70..c31fb39 100644 --- a/config/sys.config +++ b/config/sys.config @@ -47,13 +47,6 @@ client => prg_echo_processor, %% client specific options => #{} - }, - notifier => #{ - client => default_kafka_client, - options => #{ - topic => <<"default_topic">>, - lifecycle_topic => <<"default_lifecycle_topic">> - } } }, 'cached/namespace' => #{ @@ -123,15 +116,5 @@ {canal, [ {url, "http://vault"}, {engine, kvv2} - ]}, - - {brod, [ - {clients, [ - {default_kafka_client, [ - {endpoints, [{"kafka1", 9092}, {"kafka2", 9092}, {"kafka3", 9092}]}, - {auto_start_producers, true}, - {default_producer_config, []} - ]} - ]} ]} ]. diff --git a/docker-compose.yml b/docker-compose.yml index 2708c6c..310d94a 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -13,12 +13,6 @@ services: - .:$PWD hostname: progressor depends_on: - kafka1: - condition: service_healthy - kafka2: - condition: service_healthy - kafka3: - condition: service_healthy postgres: condition: service_healthy working_dir: $PWD @@ -44,42 +38,3 @@ services: limits: cpus: '2' memory: 4G - - zookeeper: - image: docker.io/confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION} - healthcheck: - test: echo ruok | nc 127.0.0.1 2181 || exit -1 - interval: 5s - timeout: 240s #🍎 - retries: 50 - environment: - KAFKA_OPTS: "-Dzookeeper.4lw.commands.whitelist=ruok" - ZOOKEEPER_CLIENT_PORT: 2181 - - kafka1: &kafka-broker - image: docker.io/confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION} - depends_on: - - zookeeper - healthcheck: - test: ["CMD", "kafka-topics", "--list", "--bootstrap-server", "localhost:9092"] - interval: 5s - timeout: 10s - retries: 5 - environment: - KAFKA_BROKER_ID: 1 - KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' - KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka1:9092 - - kafka2: - <<: *kafka-broker - environment: - KAFKA_BROKER_ID: 2 - KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' - KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka2:9092 - - kafka3: - <<: *kafka-broker - environment: - KAFKA_BROKER_ID: 3 - KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' - KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka3:9092 diff --git a/rebar.config b/rebar.config index cf6136f..9bf8096 100644 --- a/rebar.config +++ b/rebar.config @@ -1,10 +1,7 @@ {erl_opts, [debug_info]}. {deps, [ - {brod, "4.3.2"}, {prometheus, "4.11.0"}, {recon, "2.5.6"}, - {thrift, {git, "https://github.com/valitydev/thrift_erlang.git", {tag, "v1.0.0"}}}, - {mg_proto, {git, "https://github.com/valitydev/machinegun-proto.git", {branch, "master"}}}, {epg_connector, {git, "https://github.com/valitydev/epg_connector.git", {branch, "master"}}} ]}. @@ -28,7 +25,7 @@ {shell, [ {config, "config/sys.config"}, - {apps, [brod, progressor]} + {apps, [progressor]} ]}. {ct_opts, [ diff --git a/rebar.lock b/rebar.lock index 08fea5e..189dea4 100644 --- a/rebar.lock +++ b/rebar.lock @@ -1,10 +1,8 @@ {"1.2.0", -[{<<"brod">>,{pkg,<<"brod">>,<<"4.3.2">>},0}, - {<<"canal">>, +[{<<"canal">>, {git,"https://github.com/valitydev/canal", {ref,"89faedce3b054bcca7cc31ca64d2ead8a9402305"}}, 1}, - {<<"crc32cer">>,{pkg,<<"crc32cer">>,<<"0.1.11">>},2}, {<<"epg_connector">>, {git,"https://github.com/valitydev/epg_connector.git", {ref,"2e86da8083908d0d35a4eed3e2168c9ba6a8d04a"}}, @@ -15,34 +13,19 @@ 1}, {<<"jsone">>,{pkg,<<"jsone">>,<<"1.8.0">>},2}, {<<"jsx">>,{pkg,<<"jsx">>,<<"3.1.0">>},1}, - {<<"kafka_protocol">>,{pkg,<<"kafka_protocol">>,<<"4.1.10">>},1}, - {<<"mg_proto">>, - {git,"https://github.com/valitydev/machinegun-proto.git", - {ref,"3decc8f8b13c9cd1701deab47781aacddd7dbc92"}}, - 0}, {<<"prometheus">>,{pkg,<<"prometheus">>,<<"4.11.0">>},0}, {<<"quantile_estimator">>,{pkg,<<"quantile_estimator">>,<<"0.2.1">>},1}, - {<<"recon">>,{pkg,<<"recon">>,<<"2.5.6">>},0}, - {<<"thrift">>, - {git,"https://github.com/valitydev/thrift_erlang.git", - {ref,"3a60e5dc5bbd709495024f26e100b041c3547fd9"}}, - 0}]}. + {<<"recon">>,{pkg,<<"recon">>,<<"2.5.6">>},0}]}. [ {pkg_hash,[ - {<<"brod">>, <<"51F4DFF17ED43A806558EBD62CC88E7B35AED336D1BA1F3DE2D010F463D49736">>}, - {<<"crc32cer">>, <<"B550DA6D615FEB72A882D15D020F8F7DEE72DFB2CB1BCDF3B1EE8DC2AFD68CFC">>}, {<<"jsone">>, <<"347FF1FA700E182E1F9C5012FA6D737B12C854313B9AE6954CA75D3987D6C06D">>}, {<<"jsx">>, <<"D12516BAA0BB23A59BB35DCCAF02A1BD08243FCBB9EFE24F2D9D056CCFF71268">>}, - {<<"kafka_protocol">>, <<"F917B6C90C8DF0DE2B40A87D6B9AE1CFCE7788E91A65818E90E40CF76111097A">>}, {<<"prometheus">>, <<"B95F8DE8530F541BD95951E18E355A840003672E5EDA4788C5FA6183406BA29A">>}, {<<"quantile_estimator">>, <<"EF50A361F11B5F26B5F16D0696E46A9E4661756492C981F7B2229EF42FF1CD15">>}, {<<"recon">>, <<"9052588E83BFEDFD9B72E1034532AEE2A5369D9D9343B61AEB7FBCE761010741">>}]}, {pkg_hash_ext,[ - {<<"brod">>, <<"88584FDEBA746AA6729E2A1826416C10899954F68AF93659B3C2F38A2DCAA27C">>}, - {<<"crc32cer">>, <<"A39B8F0B1990AC1BF06C3A247FC6A178B740CDFC33C3B53688DC7DD6B1855942">>}, {<<"jsone">>, <<"08560B78624A12E0B5E7EC0271EC8CA38EF51F63D84D84843473E14D9B12618C">>}, {<<"jsx">>, <<"0C5CC8FDC11B53CC25CF65AC6705AD39E54ECC56D1C22E4ADB8F5A53FB9427F3">>}, - {<<"kafka_protocol">>, <<"DF680A3706EAD8695F8B306897C0A33E8063C690DA9308DB87B462CFD7029D04">>}, {<<"prometheus">>, <<"719862351AABF4DF7079B05DC085D2BBCBE3AC0AC3009E956671B1D5AB88247D">>}, {<<"quantile_estimator">>, <<"282A8A323CA2A845C9E6F787D166348F776C1D4A41EDE63046D72D422E3DA946">>}, {<<"recon">>, <<"96C6799792D735CC0F0FD0F86267E9D351E63339CBE03DF9D162010CEFC26BB0">>}]} diff --git a/src/prg_notifier.erl b/src/prg_notifier.erl deleted file mode 100644 index 864eb87..0000000 --- a/src/prg_notifier.erl +++ /dev/null @@ -1,184 +0,0 @@ --module(prg_notifier). - --include("progressor.hrl"). --include_lib("mg_proto/include/mg_proto_lifecycle_sink_thrift.hrl"). --include_lib("mg_proto/include/mg_proto_event_sink_thrift.hrl"). - --export([event_sink/3]). --export([lifecycle_sink/3]). - -%% internals --export([serialize_content/1]). - --spec event_sink(namespace_opts(), id(), [event()]) -> ok | {error, _Reason} | no_return(). -event_sink(_NsOpts, _ID, []) -> - ok; -event_sink( - #{ - namespace := NS, - notifier := #{client := Client, options := #{topic := Topic}} - }, - ID, - Events -) -> - Batch = encode(fun serialize_eventsink/3, NS, ID, Events), - ok = produce(Client, Topic, event_key(NS, ID), Batch), - ok; -event_sink(_NsOpts, _ID, _Events) -> - ok. - --spec lifecycle_sink(namespace_opts(), task_t() | {error, _Reason}, id()) -> - ok | {error, _Reason} | no_return(). -lifecycle_sink( - #{ - namespace := NS, - notifier := #{client := Client, options := #{lifecycle_topic := Topic}} - }, - TaskType, - ID -) when - TaskType =:= init; - TaskType =:= repair; - TaskType =:= remove; - erlang:is_tuple(TaskType) --> - Batch = encode(fun serialize_lifecycle/3, NS, ID, [lifecycle_event(TaskType)]), - ok = produce(Client, Topic, event_key(NS, ID), Batch), - ok; -lifecycle_sink(_NsOpts, _TaskType, _ID) -> - ok. - -%% Internal functions - -encode(Encoder, NS, ID, Events) -> - [ - #{ - key => event_key(NS, ID), - value => Encoder(NS, ID, Event) - } - || Event <- Events - ]. - -produce(Client, Topic, PartitionKey, Batch) -> - case brod:get_partitions_count(Client, Topic) of - {ok, PartitionsCount} -> - Partition = partition(PartitionsCount, PartitionKey), - case brod:produce_sync_offset(Client, Topic, Partition, PartitionKey, Batch) of - {ok, _Offset} -> - ok; - {error, _Reason} = Error -> - Error - end; - {error, _Reason} = Error -> - Error - end. - -partition(PartitionsCount, Key) -> - erlang:phash2(Key) rem PartitionsCount. - -event_key(NS, ID) -> - <>. - -%% eventsink serialization - -serialize_eventsink(SourceNS, SourceID, Event) -> - Codec = thrift_strict_binary_codec:new(), - #{ - event_id := EventID, - timestamp := Timestamp, - payload := Payload - } = Event, - Content = erlang:binary_to_term(Payload), - Metadata = maps:get(metadata, Event, #{}), - Data = - {event, #mg_evsink_MachineEvent{ - source_ns = SourceNS, - source_id = SourceID, - event_id = EventID, - created_at = serialize_timestamp(Timestamp), - format_version = maps:get(format_version, Metadata, undefined), - data = Content - }}, - Type = {struct, union, {mg_proto_event_sink_thrift, 'SinkEvent'}}, - case thrift_strict_binary_codec:write(Codec, Type, Data) of - {ok, NewCodec} -> - thrift_strict_binary_codec:close(NewCodec); - {error, Reason} -> - erlang:error({?MODULE, Reason}) - end. - -serialize_content(null) -> - {nl, #mg_msgpack_Nil{}}; -serialize_content(Boolean) when is_boolean(Boolean) -> - {b, Boolean}; -serialize_content(Integer) when is_integer(Integer) -> - {i, Integer}; -serialize_content(Float) when is_float(Float) -> - {flt, Float}; -serialize_content({string, String}) -> - {str, unicode:characters_to_binary(String, unicode)}; -serialize_content(Binary) when is_binary(Binary) -> - {bin, Binary}; -serialize_content(Object) when is_map(Object) -> - {obj, - maps:fold( - fun(K, V, Acc) -> maps:put(serialize_content(K), serialize_content(V), Acc) end, - #{}, - Object - )}; -serialize_content(Array) when is_list(Array) -> - {arr, lists:map(fun serialize_content/1, Array)}; -serialize_content(Arg) -> - erlang:error(badarg, [Arg]). - -serialize_timestamp(TimestampSec) -> - Str = calendar:system_time_to_rfc3339(TimestampSec, [{unit, second}, {offset, "Z"}]), - erlang:list_to_binary(Str). - -%% lifecycle serialization - -lifecycle_event(init) -> - {machine_lifecycle_created, #{occurred_at => erlang:system_time(second)}}; -lifecycle_event(repair) -> - {machine_lifecycle_repaired, #{occurred_at => erlang:system_time(second)}}; -lifecycle_event(remove) -> - {machine_lifecycle_removed, #{occurred_at => erlang:system_time(second)}}; -lifecycle_event({error, Reason}) -> - {machine_lifecycle_failed, #{occurred_at => erlang:system_time(second), reason => Reason}}. - -serialize_lifecycle(SourceNS, SourceID, Event) -> - Codec = thrift_strict_binary_codec:new(), - Data = serialize_lifecycle_event(SourceNS, SourceID, Event), - Type = {struct, struct, {mg_proto_lifecycle_sink_thrift, 'LifecycleEvent'}}, - case thrift_strict_binary_codec:write(Codec, Type, Data) of - {ok, NewCodec} -> - thrift_strict_binary_codec:close(NewCodec); - {error, Reason} -> - erlang:error({?MODULE, Reason}) - end. - -serialize_lifecycle_event(SourceNS, SourceID, {_, #{occurred_at := Timestamp}} = Event) -> - #mg_lifesink_LifecycleEvent{ - machine_ns = SourceNS, - machine_id = SourceID, - created_at = serialize_timestamp(Timestamp), - data = serialize_lifecycle_data(Event) - }. - -serialize_lifecycle_data({machine_lifecycle_created, _}) -> - {machine, {created, #mg_lifesink_MachineLifecycleCreatedEvent{}}}; -serialize_lifecycle_data({machine_lifecycle_failed, #{reason := Reason}}) -> - {machine, - {status_changed, #mg_lifesink_MachineLifecycleStatusChangedEvent{ - new_status = - {failed, #mg_stateproc_MachineStatusFailed{ - reason = Reason - }} - }}}; -serialize_lifecycle_data({machine_lifecycle_repaired, _}) -> - {machine, - {status_changed, #mg_lifesink_MachineLifecycleStatusChangedEvent{ - new_status = {working, #mg_stateproc_MachineStatusWorking{}} - }}}; -serialize_lifecycle_data({machine_lifecycle_removed, _}) -> - {machine, {removed, #mg_lifesink_MachineLifecycleRemovedEvent{}}}. diff --git a/src/prg_worker.erl b/src/prg_worker.erl index bd12b35..d9dd398 100644 --- a/src/prg_worker.erl +++ b/src/prg_worker.erl @@ -116,12 +116,12 @@ do_process_task( Deadline, #prg_worker_state{ ns_id = NsId, - ns_opts = #{storage := StorageOpts} = NsOpts, + ns_opts = #{storage := StorageOpts}, process = #{process_id := ProcessId} = _Process, sidecar_pid = Pid } = State ) -> - ok = prg_worker_sidecar:lifecycle_sink(Pid, Deadline, NsOpts, remove, ProcessId), + %% step hook ok = prg_worker_sidecar:remove_process(Pid, Deadline, StorageOpts, NsId, ProcessId), ok = next_task(self()), State#prg_worker_state{process = undefined}; @@ -170,7 +170,7 @@ handle_result( Deadline, #prg_worker_state{ ns_id = NsId, - ns_opts = #{storage := StorageOpts} = NsOpts, + ns_opts = #{storage := StorageOpts}, process = #{process_id := ProcessId} = Process, sidecar_pid = Pid } = State @@ -195,13 +195,7 @@ handle_result( last_retry_interval => 0, attempts_count => 0 }, - ok = prg_worker_sidecar:lifecycle_sink( - Pid, Deadline, NsOpts, extract_task_type(TaskHeader), ProcessId - ), - ok = prg_worker_sidecar:event_sink(Pid, Deadline, NsOpts, ProcessId, Events), - %% just for tests - ok = maybe_wait_call(application:get_env(progressor, call_wait_timeout, undefined)), - %% + %% step hook SaveResult = prg_worker_sidecar:complete_and_continue( Pid, Deadline, @@ -232,13 +226,13 @@ handle_result( Deadline, #prg_worker_state{ ns_id = NsId, - ns_opts = #{storage := StorageOpts} = NsOpts, + ns_opts = #{storage := StorageOpts}, process = #{process_id := ProcessId} = _Process, sidecar_pid = Pid } = State ) -> Response = response(maps:get(response, Result, undefined)), - ok = prg_worker_sidecar:lifecycle_sink(Pid, Deadline, NsOpts, remove, ProcessId), + %% step hook ok = prg_worker_sidecar:remove_process(Pid, Deadline, StorageOpts, NsId, ProcessId), _ = maybe_reply(TaskHeader, Response), ok = next_task(self()), @@ -251,15 +245,12 @@ handle_result( Deadline, #prg_worker_state{ ns_id = NsId, - ns_opts = #{storage := StorageOpts} = NsOpts, - process = #{process_id := ProcessId} = Process, + ns_opts = #{storage := StorageOpts}, + process = Process, sidecar_pid = Pid } = State ) -> - ok = prg_worker_sidecar:lifecycle_sink( - Pid, Deadline, NsOpts, extract_task_type(TaskHeader), ProcessId - ), - ok = prg_worker_sidecar:event_sink(Pid, Deadline, NsOpts, ProcessId, Events), + %% step hook ProcessUpdated = update_process( maps:without([detail, corrupted_by], Process#{status => <<"running">>}), Result ), @@ -299,16 +290,13 @@ handle_result( Deadline, #prg_worker_state{ ns_id = NsId, - ns_opts = #{storage := StorageOpts} = NsOpts, - process = #{process_id := ProcessId, corrupted_by := ErrorTaskId} = Process, + ns_opts = #{storage := StorageOpts}, + process = #{corrupted_by := ErrorTaskId} = Process, sidecar_pid = Pid } = State ) -> Now = erlang:system_time(second), - ok = prg_worker_sidecar:lifecycle_sink( - Pid, Deadline, NsOpts, extract_task_type(TaskHeader), ProcessId - ), - ok = prg_worker_sidecar:event_sink(Pid, Deadline, NsOpts, ProcessId, Events), + %% step hook ProcessUpdated = update_process( maps:without([detail, corrupted_by], Process#{status => <<"running">>}), Result ), @@ -370,15 +358,12 @@ handle_result( Deadline, #prg_worker_state{ ns_id = NsId, - ns_opts = #{storage := StorageOpts} = NsOpts, - process = #{process_id := ProcessId} = Process, + ns_opts = #{storage := StorageOpts}, + process = Process, sidecar_pid = Pid } = State ) -> - ok = prg_worker_sidecar:lifecycle_sink( - Pid, Deadline, NsOpts, extract_task_type(TaskHeader), ProcessId - ), - ok = prg_worker_sidecar:event_sink(Pid, Deadline, NsOpts, ProcessId, Events), + %% step hook ProcessUpdated = update_process( maps:without([detail, corrupted_by], Process#{status => <<"running">>}), Result ), @@ -418,8 +403,8 @@ handle_result( Deadline, #prg_worker_state{ ns_id = NsId, - ns_opts = #{storage := StorageOpts} = NsOpts, - process = #{process_id := ProcessId} = Process, + ns_opts = #{storage := StorageOpts}, + process = Process, sidecar_pid = Pid } = State ) when @@ -434,9 +419,7 @@ handle_result( Process; _ -> Detail = prg_utils:format(Reason), - ok = prg_worker_sidecar:lifecycle_sink( - Pid, Deadline, NsOpts, {error, Detail}, ProcessId - ), + %% step hook Process#{status => <<"error">>, detail => Detail} end, TaskResult = #{ @@ -459,8 +442,8 @@ handle_result( Deadline, #prg_worker_state{ ns_id = NsId, - ns_opts = #{storage := StorageOpts, retry_policy := RetryPolicy} = NsOpts, - process = #{process_id := ProcessId} = Process, + ns_opts = #{storage := StorageOpts, retry_policy := RetryPolicy}, + process = Process, sidecar_pid = Pid } = State ) when TaskType =:= timeout; TaskType =:= remove -> @@ -477,7 +460,7 @@ handle_result( ProcessUpdated = Process#{ status => <<"error">>, detail => Detail, corrupted_by => TaskId }, - ok = prg_worker_sidecar:lifecycle_sink(Pid, Deadline, NsOpts, {error, Detail}, ProcessId), + %% step hook ok = prg_worker_sidecar:complete_and_error( Pid, Deadline, StorageOpts, NsId, TaskResult, ProcessUpdated ); @@ -555,20 +538,12 @@ check_retryable(TaskHeader, #{last_retry_interval := LastInterval} = Task, Retry not_retryable end. -%% machinegun legacy --define(WOODY_ERROR(Class), {exception, _, {woody_error, Class, _}}). -define(TEST_POLICY(Error, RetryPolicy, Timeout, Attempts), (Timeout < maps:get(max_timeout, RetryPolicy, infinity) andalso Attempts < maps:get(max_attempts, RetryPolicy, infinity) andalso not lists:any(fun(E) -> Error =:= E end, maps:get(non_retryable_errors, RetryPolicy, []))) ). -is_retryable(?WOODY_ERROR(result_unexpected), _TaskHeader, _RetryPolicy, _Timeout, _Attempts) -> - false; -is_retryable(?WOODY_ERROR(resource_unavailable) = Error, {timeout, undefined}, RetryPolicy, Timeout, Attempts) -> - ?TEST_POLICY(Error, RetryPolicy, Timeout, Attempts); -is_retryable(?WOODY_ERROR(result_unknown) = Error, {timeout, undefined}, RetryPolicy, Timeout, Attempts) -> - ?TEST_POLICY(Error, RetryPolicy, Timeout, Attempts); is_retryable({exception, _, _}, _TaskHeader, _RetryPolicy, _Timeout, _Attempts) -> false; is_retryable(Error, {timeout, undefined}, RetryPolicy, Timeout, Attempts) -> @@ -599,11 +574,6 @@ action_to_task_type(#{remove := true}) -> action_to_task_type(#{set_timer := _}) -> <<"timeout">>. -maybe_wait_call(undefined) -> - ok; -maybe_wait_call(Timeout) -> - timer:sleep(Timeout). - last_event_id([]) -> 0; last_event_id(History) -> diff --git a/src/prg_worker_sidecar.erl b/src/prg_worker_sidecar.erl index f74f71d..50f8e11 100644 --- a/src/prg_worker_sidecar.erl +++ b/src/prg_worker_sidecar.erl @@ -22,9 +22,6 @@ -export([complete_and_unlock/7]). -export([complete_and_error/6]). -export([remove_process/5]). -%% Notifier functions wrapper --export([event_sink/5]). --export([lifecycle_sink/5]). %% -export([get_process/5]). -export([get_process/6]). @@ -40,7 +37,6 @@ -define(PROCESSING_KEY, progressor_task_processing_duration_ms). -define(COMPLETION_KEY, progressor_task_completion_duration_ms). -define(REMOVING_KEY, progressor_process_removing_duration_ms). --define(NOTIFICATION_KEY, progressor_notification_duration_ms). -dialyzer({nowarn_function, do_with_retry/2}). %% API @@ -142,25 +138,6 @@ remove_process(Pid, _Deadline, StorageOpts, NsId, ProcessId) -> gen_server:call(Pid, {remove_process, StorageOpts, NsId, ProcessId}, infinity) end, prg_utils:with_observe(Fun, ?REMOVING_KEY, [erlang:atom_to_list(NsId)]). - -%% notifier wrappers - --spec event_sink(pid(), timestamp_ms(), namespace_opts(), id(), [event()]) -> ok | no_return(). -event_sink(Pid, Deadline, #{namespace := NS} = NsOpts, ProcessId, Events) -> - Timeout = Deadline - erlang:system_time(millisecond), - Fun = fun() -> - gen_server:call(Pid, {event_sink, NsOpts, ProcessId, Events}, Timeout) - end, - prg_utils:with_observe(Fun, ?NOTIFICATION_KEY, [NS, "event_sink"]). - --spec lifecycle_sink(pid(), timestamp_ms(), namespace_opts(), task_t() | {error, _Reason}, id()) -> - ok | no_return(). -lifecycle_sink(Pid, Deadline, #{namespace := NS} = NsOpts, TaskType, ProcessId) -> - Timeout = Deadline - erlang:system_time(millisecond), - Fun = fun() -> - gen_server:call(Pid, {lifecycle_sink, NsOpts, TaskType, ProcessId}, Timeout) - end, - prg_utils:with_observe(Fun, ?NOTIFICATION_KEY, [NS, "lifecycle_sink"]). %% -spec get_process(pid(), timestamp_ms(), storage_opts(), namespace_id(), id()) -> @@ -286,14 +263,6 @@ handle_call( prg_storage:complete_and_error(StorageOpts, NsId, TaskResult, Process) end, Response = do_with_retry(Fun, ?DEFAULT_DELAY), - {reply, Response, State}; -handle_call({event_sink, NsOpts, ProcessId, Events}, _From, State) -> - Fun = fun() -> prg_notifier:event_sink(NsOpts, ProcessId, Events) end, - Response = do_with_retry(Fun, ?DEFAULT_DELAY), - {reply, Response, State}; -handle_call({lifecycle_sink, NsOpts, TaskType, ProcessId}, _From, State) -> - Fun = fun() -> prg_notifier:lifecycle_sink(NsOpts, TaskType, ProcessId) end, - Response = do_with_retry(Fun, ?DEFAULT_DELAY), {reply, Response, State}. handle_cast(_Request, #prg_sidecar_state{} = State) -> diff --git a/src/progressor.app.src b/src/progressor.app.src index d713868..7fee20d 100644 --- a/src/progressor.app.src +++ b/src/progressor.app.src @@ -8,10 +8,7 @@ stdlib, jsx, prometheus, - epg_connector, - thrift, - mg_proto, - brod + epg_connector ]}, {env, []}, {modules, []}, diff --git a/src/progressor_app.erl b/src/progressor_app.erl index 676dd01..e796541 100644 --- a/src/progressor_app.erl +++ b/src/progressor_app.erl @@ -98,11 +98,4 @@ create_metrics() -> {help, "Task completion durations in millisecond"}, {buckets, [50, 150, 300, 500, 750, 1000]}, {labels, [prg_namespace]} - ]), - - _ = prometheus_histogram:new([ - {name, progressor_notification_duration_ms}, - {help, "Notification durations in millisecond"}, - {buckets, [10, 50, 150, 300, 500, 1000]}, - {labels, [prg_namespace, notification_type]} ]). diff --git a/test/prg_ct_hook.erl b/test/prg_ct_hook.erl index b21eeba..dc02e02 100644 --- a/test/prg_ct_hook.erl +++ b/test/prg_ct_hook.erl @@ -3,13 +3,8 @@ %% API -export([init/2, terminate/1, pre_init_per_suite/3]). --define(LIFECYCLE_TOPIC, <<"default_lifecycle_topic">>). --define(EVENTSINK_TOPIC, <<"default_topic">>). --define(BROKERS, [{"kafka1", 9092}, {"kafka2", 9092}, {"kafka3", 9092}]). - init(_Id, State) -> _ = start_applications(), - _ = create_kafka_topics(), State. pre_init_per_suite(_SuiteName, Config, State) -> @@ -33,7 +28,6 @@ app_list() -> %% in order of launch [ epg_connector, - brod, progressor ]. @@ -73,13 +67,6 @@ app_env(progressor) -> processor => #{ client => prg_ct_processor, options => #{} - }, - notifier => #{ - client => default_kafka_client, - options => #{ - topic => ?EVENTSINK_TOPIC, - lifecycle_topic => ?LIFECYCLE_TOPIC - } } }, 'cached/namespace' => #{ @@ -119,33 +106,4 @@ app_env(epg_connector) -> } }}, {force_garbage_collect, true} - ]; -app_env(brod) -> - [ - {clients, [ - {default_kafka_client, [ - {endpoints, ?BROKERS}, - {auto_start_producers, true}, - {default_producer_config, []} - ]} - ]} ]. - -create_kafka_topics() -> - TopicConfig = [ - #{ - configs => [], - num_partitions => 1, - assignments => [], - replication_factor => 1, - name => ?EVENTSINK_TOPIC - }, - #{ - configs => [], - num_partitions => 1, - assignments => [], - replication_factor => 1, - name => ?LIFECYCLE_TOPIC - } - ], - _ = brod:create_topics(?BROKERS, TopicConfig, #{timeout => 5000}). From 9dc51f778b1b8828de0bd22a08df77026d5704eb Mon Sep 17 00:00:00 2001 From: ttt161 Date: Thu, 11 Sep 2025 10:18:56 +0300 Subject: [PATCH 2/5] state generational progressor --- docker-compose.yml | 1 - include/progressor.hrl | 43 +- src/prg_storage.erl | 45 +- src/prg_utils.erl | 7 + src/prg_worker.erl | 264 +++---- src/prg_worker_sidecar.erl | 38 +- src/progressor.erl | 113 +-- src/storage/postgres/prg_pg_backend.erl | 344 +++------ src/storage/postgres/prg_pg_cache.erl | 156 ++-- src/storage/postgres/prg_pg_migration.erl | 86 ++- src/storage/postgres/prg_pg_utils.erl | 42 +- test/prg_base_SUITE.erl | 824 +++++++--------------- 12 files changed, 683 insertions(+), 1280 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 310d94a..5b3c8e4 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -8,7 +8,6 @@ services: context: . args: OTP_VERSION: $OTP_VERSION - THRIFT_VERSION: $THRIFT_VERSION volumes: - .:$PWD hostname: progressor diff --git a/include/progressor.hrl b/include/progressor.hrl index a1887b2..5127fdf 100644 --- a/include/progressor.hrl +++ b/include/progressor.hrl @@ -7,10 +7,9 @@ detail => binary(), aux_state => binary(), metadata => map(), - history => [event()], + state => process_state(), corrupted_by => task_id(), - range => history_range(), - last_event_id => event_id() + current_generation => generation() }. -type task() :: #{ @@ -22,6 +21,7 @@ running_time => timestamp_sec(), finished_time => timestamp_sec(), args => binary(), + generation => generation(), metadata => map(), idempotency_key => binary(), response => binary(), @@ -31,14 +31,16 @@ context => binary() }. --type event() :: #{ - process_id := id(), - task_id := task_id(), - event_id := event_id(), - timestamp := timestamp_sec(), - metadata => #{format => pos_integer()}, - payload := binary() -}. +-type process_state() :: + #{ + process_id := id(), + task_id := task_id(), + generation := generation(), + timestamp := timestamp_ms(), + metadata => #{format => pos_integer()}, + payload := binary() + } + | undefined. %%% %%% Config options @@ -86,7 +88,8 @@ %%% Other types %%% -type id() :: binary(). --type event_id() :: pos_integer(). +-type explicit_generation() :: non_neg_integer(). +-type generation() :: explicit_generation() | latest. -type task_id() :: pos_integer(). -type process_status() :: binary(). @@ -106,26 +109,16 @@ -type recipient() :: internal | external. --type history_range() :: #{ - offset => non_neg_integer(), - limit => pos_integer(), - direction => forward | backward -}. - -type processor_intent() :: #{ - events := [event()], + state := process_state(), action => action(), response => term(), aux_state => binary(), metadata => map() }. -type processor_exception(Reason) :: {exception, _Class, Reason}. --type maybe_transient_error_reason() :: - processor_exception({woody_error, {_, atom(), _}}) - | any(). --type non_transient_error_reason() :: - processor_exception({woody_error, {_, result_unexpected, _}}) - | processor_exception(any()). +-type maybe_transient_error_reason() :: any(). +-type non_transient_error_reason() :: processor_exception(any()). -type process_result() :: {ok, processor_intent()} | {error, non_transient_error_reason() | maybe_transient_error_reason()}. diff --git a/src/prg_storage.erl b/src/prg_storage.erl index de26b9f..b4ddf95 100644 --- a/src/prg_storage.erl +++ b/src/prg_storage.erl @@ -9,7 +9,6 @@ -export([prepare_init/4]). -export([prepare_call/4]). -export([prepare_repair/4]). --export([put_process_data/4]). %% scan functions -export([search_timers/4]). @@ -54,16 +53,6 @@ get_task_result(#{client := Handler, options := HandlerOpts}, NsId, KeyOrId) -> get_process_status(#{client := Handler, options := HandlerOpts}, NsId, Id) -> Handler:get_process_status(HandlerOpts, NsId, Id). --spec put_process_data( - storage_opts(), - namespace_id(), - id(), - #{process := process(), init_task => task(), active_task => task()} -) -> - {ok, _Result} | {error, _Reason}. -put_process_data(#{client := Handler, options := HandlerOpts}, NsId, Id, ProcessData) -> - Handler:put_process_data(HandlerOpts, NsId, Id, ProcessData). - -spec prepare_init(storage_opts(), namespace_id(), id(), task()) -> {ok, {postpone, task_id()} | {continue, task_id()}} | {error, _Reason}. prepare_init(#{client := Handler, options := HandlerOpts}, NsId, ProcessId, InitTask) -> @@ -101,31 +90,31 @@ collect_zombies(#{client := Handler, options := HandlerOpts}, NsId, Timeout) -> %%%%%%%%%%%%%%%%%%% -spec complete_and_continue( - storage_opts(), namespace_id(), task_result(), process(), [event()], task() + storage_opts(), namespace_id(), task_result(), process(), process_state(), task() ) -> {ok, [task()]}. complete_and_continue( - #{client := Handler, options := HandlerOpts}, NsId, TaskResult, Process, Events, NextTask + #{client := Handler, options := HandlerOpts}, NsId, TaskResult, Process, ProcessState, NextTask ) -> - Handler:complete_and_continue(HandlerOpts, NsId, TaskResult, Process, Events, NextTask). + Handler:complete_and_continue(HandlerOpts, NsId, TaskResult, Process, ProcessState, NextTask). --spec complete_and_suspend(storage_opts(), namespace_id(), task_result(), process(), [event()]) -> +-spec complete_and_suspend(storage_opts(), namespace_id(), task_result(), process(), process_state()) -> {ok, [task()]}. complete_and_suspend( - #{client := Handler, options := HandlerOpts}, NsId, TaskResult, Process, Events + #{client := Handler, options := HandlerOpts}, NsId, TaskResult, Process, ProcessState ) -> - Handler:complete_and_suspend(HandlerOpts, NsId, TaskResult, Process, Events). + Handler:complete_and_suspend(HandlerOpts, NsId, TaskResult, Process, ProcessState). -spec complete_and_error(storage_opts(), namespace_id(), task_result(), process()) -> ok. complete_and_error(#{client := Handler, options := HandlerOpts}, NsId, TaskResult, Process) -> Handler:complete_and_error(HandlerOpts, NsId, TaskResult, Process). --spec complete_and_unlock(storage_opts(), namespace_id(), task_result(), process(), [event()]) -> +-spec complete_and_unlock(storage_opts(), namespace_id(), task_result(), process(), process_state()) -> {ok, [task()]}. complete_and_unlock( - #{client := Handler, options := HandlerOpts}, NsId, TaskResult, Process, Events + #{client := Handler, options := HandlerOpts}, NsId, TaskResult, Process, ProcessState ) -> - Handler:complete_and_unlock(HandlerOpts, NsId, TaskResult, Process, Events). + Handler:complete_and_unlock(HandlerOpts, NsId, TaskResult, Process, ProcessState). -spec remove_process(storage_opts(), namespace_id(), id()) -> ok | no_return(). remove_process(#{client := Handler, options := HandlerOpts}, NsId, ProcessId) -> @@ -146,23 +135,23 @@ get_task(Recipient, #{client := Handler, options := HandlerOpts}, NsId, TaskId) -spec get_process(storage_opts(), namespace_id(), id()) -> {ok, process()} | {error, _Reason}. get_process(StorageOpts, NsId, ProcessId) -> - get_process(internal, StorageOpts, NsId, ProcessId, #{}). + get_process(internal, StorageOpts, NsId, ProcessId, latest). -spec get_process( storage_opts() | recipient(), namespace_id() | storage_opts(), id() | namespace_id(), - history_range() | id() + generation() | id() ) -> {ok, process()} | {error, _Reason}. -get_process(StorageOpts, NsId, ProcessId, HistoryRange) when is_map(StorageOpts) -> - get_process(internal, StorageOpts, NsId, ProcessId, HistoryRange); +get_process(StorageOpts, NsId, ProcessId, Generation) when is_map(StorageOpts) -> + get_process(internal, StorageOpts, NsId, ProcessId, Generation); get_process(Recipient, StorageOpts, NsId, ProcessId) when is_atom(Recipient) -> - get_process(Recipient, StorageOpts, NsId, ProcessId, #{}). + get_process(Recipient, StorageOpts, NsId, ProcessId, latest). --spec get_process(recipient(), storage_opts(), namespace_id(), id(), history_range()) -> +-spec get_process(recipient(), storage_opts(), namespace_id(), id(), generation()) -> {ok, process()} | {error, _Reason}. -get_process(Recipient, #{client := Handler, options := HandlerOpts}, NsId, ProcessId, HistoryRange) -> - Handler:get_process(Recipient, HandlerOpts, NsId, ProcessId, HistoryRange). +get_process(Recipient, #{client := Handler, options := HandlerOpts}, NsId, ProcessId, Generation) -> + Handler:get_process(Recipient, HandlerOpts, NsId, ProcessId, Generation). %%% diff --git a/src/prg_utils.erl b/src/prg_utils.erl index f42489e..d41d681 100644 --- a/src/prg_utils.erl +++ b/src/prg_utils.erl @@ -10,6 +10,7 @@ -export([unixtime_to_datetime/1]). -export([with_observe/3]). -export([with_observe/4]). +-export([define/2]). -spec registered_name(atom(), string()) -> atom(). registered_name(BaseAtom, PostfixStr) -> @@ -27,6 +28,12 @@ format(Term) when is_binary(Term) -> format(Term) -> unicode:characters_to_binary(io_lib:format("~64000p", [Term])). +-spec define(term(), term()) -> term(). +define(undefined, Default) -> + Default; +define(Value, _Default) -> + Value. + -spec make_ns_opts(atom(), namespace_opts()) -> namespace_opts(). make_ns_opts(NsId, NsOpts) -> PresetDefaults = #{ diff --git a/src/prg_worker.erl b/src/prg_worker.erl index d9dd398..7ffb17d 100644 --- a/src/prg_worker.erl +++ b/src/prg_worker.erl @@ -21,8 +21,6 @@ -record(prg_worker_state, {ns_id, ns_opts, process, sidecar_pid}). --define(DEFAULT_RANGE, #{direction => forward}). - %%% %%% API %%% @@ -77,18 +75,32 @@ handle_cast( ) -> Deadline = erlang:system_time(millisecond) + TimeoutSec * 1000, ProcessId = maps:get(process_id, Task), - HistoryRange = maps:get(range, maps:get(metadata, Task, #{}), #{}), - {ok, Process} = prg_worker_sidecar:get_process(Pid, Deadline, StorageOpts, NsId, ProcessId, HistoryRange), + Generation = maps:get(generation, Task, latest), + {ok, Process} = prg_worker_sidecar:get_process(Pid, Deadline, StorageOpts, NsId, ProcessId, Generation), NewState = do_process_task(TaskHeader, Task, Deadline, State#prg_worker_state{process = Process}), {noreply, NewState}; handle_cast( {continuation_task, TaskHeader, Task}, #prg_worker_state{ - ns_opts = #{process_step_timeout := TimeoutSec} + ns_id = NsId, + ns_opts = #{storage := StorageOpts, process_step_timeout := TimeoutSec}, + process = #{current_generation := CurrentGeneration} = CurrentProcess, + sidecar_pid = Pid } = State ) -> Deadline = erlang:system_time(millisecond) + TimeoutSec * 1000, - NewState = do_process_task(TaskHeader, Task, Deadline, State), + ProcessId = maps:get(process_id, Task), + GenerationFromTask = maps:get(generation, Task, CurrentGeneration), + Process = + case GenerationFromTask =:= CurrentGeneration of + true -> + CurrentProcess; + false -> + {ok, ProcessWithTargetState} = + prg_worker_sidecar:get_process(Pid, Deadline, StorageOpts, NsId, ProcessId, GenerationFromTask), + ProcessWithTargetState + end, + NewState = do_process_task(TaskHeader, Task, Deadline, State#prg_worker_state{process = Process}), {noreply, NewState}; handle_cast(next_task, #prg_worker_state{sidecar_pid = CurrentPid}) -> %% kill sidecar and restart to clear memory @@ -140,31 +152,11 @@ do_process_task( Ctx = maps:get(context, Task, <<>>), Request = {extract_task_type(TaskHeader), Args, Process}, Result = prg_worker_sidecar:process(Pid, Deadline, NsOpts, Request, Ctx), - State1 = maybe_restore_history(Task, State), - handle_result(Result, TaskHeader, Task, Deadline, State1). - -maybe_restore_history(#{metadata := #{range := Range}}, State) when Range =:= ?DEFAULT_RANGE -> - State; -%% if task range is defined then need restore full history for continuation -maybe_restore_history( - #{metadata := #{range := Range}}, - #prg_worker_state{ - ns_id = NsId, - ns_opts = #{storage := StorageOpts, process_step_timeout := TimeoutSec} = _NsOpts, - sidecar_pid = Pid, - process = #{process_id := ProcessId} - } = State -) when map_size(Range) > 0 -> - Deadline = erlang:system_time(millisecond) + TimeoutSec * 1000, - {ok, ProcessUpd} = prg_worker_sidecar:get_process(Pid, Deadline, StorageOpts, NsId, ProcessId, #{}), - State#prg_worker_state{process = ProcessUpd}; -%% if task range undefined then history is full -maybe_restore_history(_, State) -> - State. + handle_result(Result, TaskHeader, Task, Deadline, State). %% success result with timer handle_result( - {ok, #{action := #{set_timer := Timestamp} = Action, events := Events} = Result}, + {ok, #{action := #{set_timer := Timestamp} = Action, state := ProcessState0} = Result}, TaskHeader, #{task_id := TaskId, context := Context}, Deadline, @@ -175,17 +167,31 @@ handle_result( sidecar_pid = Pid } = State ) -> + %% this generation indicates the generation of state that was used in the task + %% may be older than current (last) generation + GenerationFromTask = state_generation(Process), + CurrentGeneration = prg_utils:define(current_generation(Process), 0), + NextGeneration = CurrentGeneration + 1, + ProcessState = ProcessState0#{generation => NextGeneration}, Now = erlang:system_time(second), ProcessUpdated = update_process( - maps:without([detail, corrupted_by], Process#{status => <<"running">>}), Result + maps:without( + [detail, corrupted_by], + Process#{status => <<"running">>, current_generation => NextGeneration} + ), + Result ), Response = response(maps:get(response, Result, undefined)), - TaskResult = #{ - task_id => TaskId, - response => term_to_binary(Response), - finished_time => Now, - status => <<"finished">> - }, + %% save generation on which task was executed + TaskResult = maybe_add_generation( + GenerationFromTask, + #{ + task_id => TaskId, + response => term_to_binary(Response), + finished_time => Now, + status => <<"finished">> + } + ), NewTask = #{ process_id => ProcessId, task_type => action_to_task_type(Action), @@ -203,7 +209,7 @@ handle_result( NsId, TaskResult, ProcessUpdated, - Events, + ProcessState, NewTask ), _ = maybe_reply(TaskHeader, Response), @@ -212,10 +218,9 @@ handle_result( ok = next_task(self()), State#prg_worker_state{process = undefined}; {ok, [ContinuationTask | _]} -> - NewHistory = maps:get(history, Process) ++ Events, ok = continuation_task(self(), create_header(ContinuationTask), ContinuationTask), State#prg_worker_state{ - process = ProcessUpdated#{history => NewHistory, last_event_id => last_event_id(NewHistory)} + process = ProcessUpdated#{state => ProcessState} } end; %% success result with undefined timer and remove action @@ -239,7 +244,7 @@ handle_result( State#prg_worker_state{process = undefined}; %% success result with unset_timer action handle_result( - {ok, #{events := Events, action := unset_timer} = Result}, + {ok, #{state := ProcessState0, action := unset_timer} = Result}, TaskHeader, #{task_id := TaskId} = _Task, Deadline, @@ -250,17 +255,25 @@ handle_result( sidecar_pid = Pid } = State ) -> + GenerationFromTask = state_generation(Process), + CurrentGeneration = prg_utils:define(current_generation(Process), 0), + NextGeneration = CurrentGeneration + 1, + ProcessState = ProcessState0#{generation => NextGeneration}, %% step hook ProcessUpdated = update_process( - maps:without([detail, corrupted_by], Process#{status => <<"running">>}), Result + maps:without([detail, corrupted_by], Process#{status => <<"running">>, current_generation => NextGeneration}), + Result ), Response = response(maps:get(response, Result, undefined)), - TaskResult = #{ - task_id => TaskId, - response => term_to_binary(Response), - finished_time => erlang:system_time(second), - status => <<"finished">> - }, + TaskResult = maybe_add_generation( + GenerationFromTask, + #{ + task_id => TaskId, + response => term_to_binary(Response), + finished_time => erlang:system_time(second), + status => <<"finished">> + } + ), SaveResult = prg_worker_sidecar:complete_and_suspend( Pid, Deadline, @@ -268,7 +281,7 @@ handle_result( NsId, TaskResult, ProcessUpdated, - Events + ProcessState ), _ = maybe_reply(TaskHeader, Response), case SaveResult of @@ -276,83 +289,14 @@ handle_result( ok = next_task(self()), State#prg_worker_state{process = undefined}; {ok, [ContinuationTask | _]} -> - NewHistory = maps:get(history, Process) ++ Events, ok = continuation_task(self(), create_header(ContinuationTask), ContinuationTask), State#prg_worker_state{ - process = ProcessUpdated#{history => NewHistory, last_event_id => last_event_id(NewHistory)} + process = ProcessUpdated#{state => ProcessState} } end; -%% success repair with corrupted task and undefined action -handle_result( - {ok, #{events := Events} = Result}, - {repair, _} = TaskHeader, - #{task_id := TaskId} = _Task, - Deadline, - #prg_worker_state{ - ns_id = NsId, - ns_opts = #{storage := StorageOpts}, - process = #{corrupted_by := ErrorTaskId} = Process, - sidecar_pid = Pid - } = State -) -> - Now = erlang:system_time(second), - %% step hook - ProcessUpdated = update_process( - maps:without([detail, corrupted_by], Process#{status => <<"running">>}), Result - ), - Response = response(maps:get(response, Result, undefined)), - TaskResult = #{ - task_id => TaskId, - response => term_to_binary(Response), - finished_time => erlang:system_time(second), - status => <<"finished">> - }, - {ok, ErrorTask} = prg_worker_sidecar:get_task(Pid, Deadline, StorageOpts, NsId, ErrorTaskId), - case ErrorTask of - #{task_type := Type} when Type =:= <<"timeout">>; Type =:= <<"remove">> -> - %% machinegun legacy behaviour - NewTask0 = maps:with( - [process_id, task_type, scheduled_time, args, metadata, context], ErrorTask - ), - NewTask = NewTask0#{ - status => <<"running">>, - running_time => Now, - last_retry_interval => 0, - attempts_count => 0 - }, - {ok, [ContinuationTask | _]} = prg_worker_sidecar:complete_and_continue( - Pid, - Deadline, - StorageOpts, - NsId, - TaskResult, - ProcessUpdated, - Events, - NewTask - ), - _ = maybe_reply(TaskHeader, Response), - NewHistory = maps:get(history, Process) ++ Events, - ok = continuation_task(self(), create_header(ContinuationTask), ContinuationTask), - State#prg_worker_state{ - process = ProcessUpdated#{history => NewHistory, last_event_id => last_event_id(NewHistory)} - }; - _ -> - {ok, []} = prg_worker_sidecar:complete_and_unlock( - Pid, - Deadline, - StorageOpts, - NsId, - TaskResult, - ProcessUpdated, - Events - ), - _ = maybe_reply(TaskHeader, Response), - ok = next_task(self()), - State#prg_worker_state{process = undefined} - end; %% success result with undefined action handle_result( - {ok, #{events := Events} = Result}, + {ok, #{state := ProcessState0} = Result}, TaskHeader, #{task_id := TaskId} = _Task, Deadline, @@ -363,17 +307,25 @@ handle_result( sidecar_pid = Pid } = State ) -> + GenerationFromTask = state_generation(Process), + CurrentGeneration = prg_utils:define(current_generation(Process), 0), + NextGeneration = CurrentGeneration + 1, + ProcessState = ProcessState0#{generation => NextGeneration}, %% step hook ProcessUpdated = update_process( - maps:without([detail, corrupted_by], Process#{status => <<"running">>}), Result + maps:without([detail, corrupted_by], Process#{status => <<"running">>, current_generation => NextGeneration}), + Result ), Response = response(maps:get(response, Result, undefined)), - TaskResult = #{ - task_id => TaskId, - response => term_to_binary(Response), - finished_time => erlang:system_time(second), - status => <<"finished">> - }, + TaskResult = maybe_add_generation( + GenerationFromTask, + #{ + task_id => TaskId, + response => term_to_binary(Response), + finished_time => erlang:system_time(second), + status => <<"finished">> + } + ), SaveResult = prg_worker_sidecar:complete_and_unlock( Pid, Deadline, @@ -381,7 +333,7 @@ handle_result( NsId, TaskResult, ProcessUpdated, - Events + ProcessState ), _ = maybe_reply(TaskHeader, Response), case SaveResult of @@ -389,10 +341,9 @@ handle_result( ok = next_task(self()), State#prg_worker_state{process = undefined}; {ok, [ContinuationTask | _]} -> - NewHistory = maps:get(history, Process) ++ Events, ok = continuation_task(self(), create_header(ContinuationTask), ContinuationTask), State#prg_worker_state{ - process = ProcessUpdated#{history => NewHistory, last_event_id => last_event_id(NewHistory)} + process = ProcessUpdated#{state => ProcessState} } end; %% calls processing error @@ -410,9 +361,9 @@ handle_result( ) when TaskType =:= init; TaskType =:= call; - TaskType =:= notify; TaskType =:= repair -> + GenerationFromTask = state_generation(Process), ProcessUpdated = case TaskType of repair -> @@ -420,14 +371,17 @@ handle_result( _ -> Detail = prg_utils:format(Reason), %% step hook - Process#{status => <<"error">>, detail => Detail} + Process#{status => <<"error">>, detail => Detail, corrupted_by => TaskId} end, - TaskResult = #{ - task_id => TaskId, - response => term_to_binary(Response), - finished_time => erlang:system_time(second), - status => <<"error">> - }, + TaskResult = maybe_add_generation( + GenerationFromTask, + #{ + task_id => TaskId, + response => term_to_binary(Response), + finished_time => erlang:system_time(second), + status => <<"error">> + } + ), ok = prg_worker_sidecar:complete_and_error( Pid, Deadline, StorageOpts, NsId, TaskResult, ProcessUpdated ), @@ -447,12 +401,16 @@ handle_result( sidecar_pid = Pid } = State ) when TaskType =:= timeout; TaskType =:= remove -> - TaskResult = #{ - task_id => TaskId, - response => term_to_binary(Response), - finished_time => erlang:system_time(second), - status => <<"error">> - }, + GenerationFromTask = state_generation(Process), + TaskResult = maybe_add_generation( + GenerationFromTask, + #{ + task_id => TaskId, + response => term_to_binary(Response), + finished_time => erlang:system_time(second), + status => <<"error">> + } + ), _ = case check_retryable(TaskHeader, Task, RetryPolicy, Reason) of not_retryable -> @@ -472,7 +430,7 @@ handle_result( NsId, TaskResult, Process, - [], + undefined, NewTask ) end, @@ -549,6 +507,7 @@ is_retryable({exception, _, _}, _TaskHeader, _RetryPolicy, _Timeout, _Attempts) is_retryable(Error, {timeout, undefined}, RetryPolicy, Timeout, Attempts) -> ?TEST_POLICY(Error, RetryPolicy, Timeout, Attempts); is_retryable(_Error, _TaskHeader, _RetryPolicy, _Timeout, _Attempts) -> + %% only timeout/remove task can be retryable false. create_status(Timestamp, Now) when Timestamp =< Now -> @@ -574,8 +533,17 @@ action_to_task_type(#{remove := true}) -> action_to_task_type(#{set_timer := _}) -> <<"timeout">>. -last_event_id([]) -> - 0; -last_event_id(History) -> - [#{event_id := Id} | _] = lists:reverse(History), - Id. +state_generation(#{state := #{generation := Generation}}) -> + Generation; +state_generation(_) -> + undefined. + +current_generation(#{current_generation := CurrentGeneration}) -> + CurrentGeneration; +current_generation(_) -> + undefined. + +maybe_add_generation(undefined, TaskResult) -> + TaskResult; +maybe_add_generation(Gen, TaskResult) -> + TaskResult#{generation => Gen}. diff --git a/src/prg_worker_sidecar.erl b/src/prg_worker_sidecar.erl index 50f8e11..90b6615 100644 --- a/src/prg_worker_sidecar.erl +++ b/src/prg_worker_sidecar.erl @@ -59,15 +59,15 @@ process(Pid, Deadline, #{namespace := NS} = NsOpts, {TaskType, _, _} = Request, namespace_id(), task_result(), process(), - [event()], + process_state(), task() ) -> {ok, [task()]} | no_return(). -complete_and_continue(Pid, _Deadline, StorageOpts, NsId, TaskResult, Process, Events, Task) -> +complete_and_continue(Pid, _Deadline, StorageOpts, NsId, TaskResult, Process, ProcessState, Task) -> %% Timeout = Deadline - erlang:system_time(millisecond), Fun = fun() -> gen_server:call( Pid, - {complete_and_continue, StorageOpts, NsId, TaskResult, Process, Events, Task}, + {complete_and_continue, StorageOpts, NsId, TaskResult, Process, ProcessState, Task}, infinity ) end, @@ -82,14 +82,14 @@ complete_and_continue(Pid, _Deadline, StorageOpts, NsId, TaskResult, Process, Ev namespace_id(), task_result(), process(), - [event()] + process_state() ) -> {ok, [task()]} | no_return(). -complete_and_suspend(Pid, _Deadline, StorageOpts, NsId, TaskResult, Process, Events) -> +complete_and_suspend(Pid, _Deadline, StorageOpts, NsId, TaskResult, Process, ProcessState) -> %% Timeout = Deadline - erlang:system_time(millisecond), Fun = fun() -> gen_server:call( Pid, - {complete_and_suspend, StorageOpts, NsId, TaskResult, Process, Events}, + {complete_and_suspend, StorageOpts, NsId, TaskResult, Process, ProcessState}, infinity ) end, @@ -102,14 +102,14 @@ complete_and_suspend(Pid, _Deadline, StorageOpts, NsId, TaskResult, Process, Eve namespace_id(), task_result(), process(), - [event()] + process_state() ) -> {ok, [task()]} | no_return(). -complete_and_unlock(Pid, _Deadline, StorageOpts, NsId, TaskResult, Process, Events) -> +complete_and_unlock(Pid, _Deadline, StorageOpts, NsId, TaskResult, Process, ProcessState) -> %% Timeout = Deadline - erlang:system_time(millisecond), Fun = fun() -> gen_server:call( Pid, - {complete_and_unlock, StorageOpts, NsId, TaskResult, Process, Events}, + {complete_and_unlock, StorageOpts, NsId, TaskResult, Process, ProcessState}, infinity ) end, @@ -144,13 +144,13 @@ remove_process(Pid, _Deadline, StorageOpts, NsId, ProcessId) -> {ok, process()} | {error, _Reason}. get_process(Pid, _Deadline, StorageOpts, NsId, ProcessId) -> %% Timeout = Deadline - erlang:system_time(millisecond), - gen_server:call(Pid, {get_process, StorageOpts, NsId, ProcessId, #{}}, infinity). + gen_server:call(Pid, {get_process, StorageOpts, NsId, ProcessId, latest}, infinity). --spec get_process(pid(), timestamp_ms(), storage_opts(), namespace_id(), id(), history_range()) -> +-spec get_process(pid(), timestamp_ms(), storage_opts(), namespace_id(), id(), generation()) -> {ok, process()} | {error, _Reason}. -get_process(Pid, _Deadline, StorageOpts, NsId, ProcessId, HistoryRange) -> +get_process(Pid, _Deadline, StorageOpts, NsId, ProcessId, Generation) -> %% Timeout = Deadline - erlang:system_time(millisecond), - gen_server:call(Pid, {get_process, StorageOpts, NsId, ProcessId, HistoryRange}, infinity). + gen_server:call(Pid, {get_process, StorageOpts, NsId, ProcessId, Generation}, infinity). -spec get_task(pid(), timestamp_ms(), storage_opts(), namespace_id(), task_id()) -> {ok, task()} | {error, _Reason}. @@ -195,12 +195,12 @@ handle_call( end, {reply, Response, State}; handle_call( - {complete_and_continue, StorageOpts, NsId, TaskResult, Process, Events, Task}, + {complete_and_continue, StorageOpts, NsId, TaskResult, Process, ProcessState, Task}, _From, #prg_sidecar_state{} = State ) -> Fun = fun() -> - prg_storage:complete_and_continue(StorageOpts, NsId, TaskResult, Process, Events, Task) + prg_storage:complete_and_continue(StorageOpts, NsId, TaskResult, Process, ProcessState, Task) end, Response = do_with_retry(Fun, ?DEFAULT_DELAY), {reply, Response, State}; @@ -235,22 +235,22 @@ handle_call( Response = do_with_retry(Fun, ?DEFAULT_DELAY), {reply, Response, State}; handle_call( - {complete_and_suspend, StorageOpts, NsId, TaskResult, Process, Events}, + {complete_and_suspend, StorageOpts, NsId, TaskResult, Process, ProcessState}, _From, #prg_sidecar_state{} = State ) -> Fun = fun() -> - prg_storage:complete_and_suspend(StorageOpts, NsId, TaskResult, Process, Events) + prg_storage:complete_and_suspend(StorageOpts, NsId, TaskResult, Process, ProcessState) end, Response = do_with_retry(Fun, ?DEFAULT_DELAY), {reply, Response, State}; handle_call( - {complete_and_unlock, StorageOpts, NsId, TaskResult, Process, Events}, + {complete_and_unlock, StorageOpts, NsId, TaskResult, Process, ProcessState}, _From, #prg_sidecar_state{} = State ) -> Fun = fun() -> - prg_storage:complete_and_unlock(StorageOpts, NsId, TaskResult, Process, Events) + prg_storage:complete_and_unlock(StorageOpts, NsId, TaskResult, Process, ProcessState) end, Response = do_with_retry(Fun, ?DEFAULT_DELAY), {reply, Response, State}; diff --git a/src/progressor.erl b/src/progressor.erl index ad7fd61..32e8110 100644 --- a/src/progressor.erl +++ b/src/progressor.erl @@ -11,10 +11,7 @@ -export([repair/1]). -export([simple_repair/1]). -export([get/1]). --export([put/1]). -export([health_check/1]). -%% TODO -%% -export([remove/1]). %% Internal API -export([reply/2]). @@ -29,7 +26,7 @@ args => term(), idempotency_key => binary(), context => binary(), - range => history_range(), + generation => explicit_generation(), options => map() }. @@ -104,16 +101,6 @@ get(Req) -> Req ). --spec put(request()) -> {ok, _Result} | {error, _Reason}. -put(Req) -> - prg_utils:pipe( - [ - fun add_ns_opts/1, - fun do_put/1 - ], - Req - ). - %% Details term must be json compatible for jsx encode/decode -spec health_check([namespace_id()]) -> {Status :: passing | critical, Details :: term()}. health_check(Namespaces) -> @@ -171,18 +158,16 @@ check_idempotency(#{idempotency_key := _IdempotencyKey} = Req) -> check_idempotency(Req) -> Req. -add_task(#{id := Id, type := Type} = Opts) -> - Context = maps:get(context, Opts, <<>>), - Args = maps:get(args, Opts, <<>>), +add_task(#{id := Id, type := Type} = Req) -> TaskData = #{ process_id => Id, - args => Args, + args => maps:get(args, Req, <<>>), task_type => convert_task_type(Type), - context => Context, - metadata => #{range => maps:get(range, Opts, #{})} + context => maps:get(context, Req, <<>>), + idempotency_key => maps:get(idempotency_key, Req, undefined), + generation => maps:get(generation, Req, undefined) }, - Task = make_task(maybe_add_idempotency(TaskData, maps:get(idempotency_key, Opts, undefined))), - Opts#{task => Task}. + Req#{task => make_task(TaskData)}. check_process_status( #{ns_opts := #{storage := StorageOpts}, id := Id, ns := NsId} = Opts, ExpectedStatus @@ -268,42 +253,10 @@ await_task_result(StorageOpts, NsId, KeyOrId, Timeout, Duration) -> ) end. -do_get(#{ns_opts := #{storage := StorageOpts}, id := Id, ns := NsId, range := HistoryRange} = Req) -> - prg_storage:get_process(recipient(options(Req)), StorageOpts, NsId, Id, HistoryRange); +do_get(#{ns_opts := #{storage := StorageOpts}, id := Id, ns := NsId, generation := Gen} = Req) -> + prg_storage:get_process(recipient(options(Req)), StorageOpts, NsId, Id, Gen); do_get(#{ns_opts := #{storage := StorageOpts}, id := Id, ns := NsId} = Req) -> - prg_storage:get_process(recipient(options(Req)), StorageOpts, NsId, Id, #{}). - -do_put( - #{ - ns_opts := #{storage := StorageOpts}, - id := Id, - ns := NsId, - args := #{process := Process} = Args - } = Opts -) -> - #{ - process_id := ProcessId - } = Process, - Action = maps:get(action, Args, undefined), - Context = maps:get(context, Opts, <<>>), - Now = erlang:system_time(second), - InitTask = #{ - process_id => ProcessId, - task_type => <<"init">>, - status => <<"finished">>, - args => <<>>, - context => Context, - response => term_to_binary({ok, ok}), - scheduled_time => Now, - running_time => Now, - finished_time => Now, - last_retry_interval => 0, - attempts_count => 0 - }, - ActiveTask = action_to_task(Action, ProcessId, Context), - ProcessData0 = #{process => Process, init_task => InitTask}, - ProcessData = maybe_add_key(ActiveTask, active_task, ProcessData0), - prg_storage:put_process_data(StorageOpts, NsId, Id, ProcessData). + prg_storage:get_process(recipient(options(Req)), StorageOpts, NsId, Id, latest). do_health_check(#{ns := NsId, ns_opts := #{storage := StorageOpts}}) -> try prg_storage:health_check(StorageOpts) of @@ -355,11 +308,6 @@ convert_task_type(timeout) -> convert_task_type(repair) -> <<"repair">>. -maybe_add_idempotency(Task, undefined) -> - Task; -maybe_add_idempotency(Task, IdempotencyKey) -> - Task#{idempotency_key => IdempotencyKey}. - make_task(#{task_type := TaskType} = TaskData) when TaskType =:= <<"init">>; TaskType =:= <<"call">>; @@ -373,7 +321,7 @@ make_task(#{task_type := TaskType} = TaskData) when last_retry_interval => 0, attempts_count => 0 }, - maps:merge(Defaults, TaskData); + maps:filter(fun(_K, V) -> V =/= undefined end, maps:merge(Defaults, TaskData)); make_task(#{task_type := <<"timeout">>} = TaskData) -> Now = erlang:system_time(second), Defaults = #{ @@ -384,18 +332,7 @@ make_task(#{task_type := <<"timeout">>} = TaskData) -> last_retry_interval => 0, attempts_count => 0 }, - maps:merge(Defaults, TaskData); -make_task(#{task_type := <<"notify">>} = TaskData) -> - Now = erlang:system_time(second), - Defaults = #{ - status => <<"running">>, - scheduled_time => Now, - running_time => Now, - response => term_to_binary({ok, ok}), - last_retry_interval => 0, - attempts_count => 0 - }, - maps:merge(Defaults, TaskData). + maps:filter(fun(_K, V) -> V =/= undefined end, maps:merge(Defaults, TaskData)). capture_worker(NsId) -> case prg_scheduler:capture_worker(NsId, self()) of @@ -413,32 +350,6 @@ check_for_run(undefined) -> check_for_run(Pid) when is_pid(Pid) -> <<"running">>. -action_to_task(undefined, _ProcessId, _Ctx) -> - undefined; -action_to_task(unset_timer, _ProcessId, _Ctx) -> - undefined; -action_to_task(#{set_timer := Timestamp} = Action, ProcessId, Context) -> - TaskType = - case maps:get(remove, Action, false) of - true -> <<"remove">>; - false -> <<"timeout">> - end, - #{ - process_id => ProcessId, - task_type => TaskType, - status => <<"waiting">>, - args => <<>>, - context => Context, - scheduled_time => Timestamp, - last_retry_interval => 0, - attempts_count => 0 - }. - -maybe_add_key(undefined, _Key, Map) -> - Map; -maybe_add_key(Value, Key, Map) -> - Map#{Key => Value}. - options(#{options := Opts}) -> Opts; options(_) -> diff --git a/src/storage/postgres/prg_pg_backend.erl b/src/storage/postgres/prg_pg_backend.erl index 16004de..081648f 100644 --- a/src/storage/postgres/prg_pg_backend.erl +++ b/src/storage/postgres/prg_pg_backend.erl @@ -12,7 +12,6 @@ -export([prepare_init/4]). -export([prepare_call/4]). -export([prepare_repair/4]). --export([put_process_data/4]). %% scan functions -export([collect_zombies/3]). @@ -83,7 +82,7 @@ get_task(Recipient, PgOpts, NsId, TaskId) -> {ok, _, []} -> {error, not_found}; {ok, Columns, Rows} -> - [Task] = to_maps(Columns, Rows, fun marshal_task/1), + [Task] = to_maps(Columns, Rows, fun prg_pg_utils:marshal_task/1), {ok, Task} end. @@ -103,22 +102,22 @@ get_process_status(PgOpts, NsId, Id) -> [{Status}] -> {ok, Status} end. --spec get_process(recipient(), pg_opts(), namespace_id(), id(), history_range()) -> +-spec get_process(recipient(), pg_opts(), namespace_id(), id(), generation()) -> {ok, process()} | {error, _Reason}. -get_process(external = Recipient, #{cache := _DbRef} = PgOpts, NsId, ProcessId, HistoryRange) -> - case prg_pg_cache:get(NsId, ProcessId, HistoryRange) of +get_process(external = Recipient, #{cache := _DbRef} = PgOpts, NsId, ProcessId, Generation) -> + case prg_pg_cache:get(NsId, ProcessId, Generation) of undefined -> - get_process(Recipient, maps:without([cache], PgOpts), NsId, ProcessId, HistoryRange); + get_process(Recipient, maps:without([cache], PgOpts), NsId, ProcessId, Generation); {ok, _} = Response -> Response end; -get_process(Recipient, PgOpts, NsId, ProcessId, HistoryRange) -> +get_process(Recipient, PgOpts, NsId, ProcessId, Generation) -> Pool = get_pool(Recipient, PgOpts), #{ processes := ProcessesTable, - events := EventsTable + generations := GensTable } = prg_pg_utils:tables(NsId), - RangeCondition = create_range_condition(HistoryRange), + GenCondition = generation_condition(Generation), RawResult = epg_pool:transaction( Pool, fun(Connection) -> @@ -127,64 +126,23 @@ get_process(Recipient, PgOpts, NsId, ProcessId, HistoryRange) -> {error, <<"process not found">>}; {ok, ColumnsPr, RowsPr} -> {ok, _, _} = - {ok, ColumnstEv, RowsEv} = do_get_events(Connection, EventsTable, ProcessId, RangeCondition), - LastEventId = get_last_event_id(Connection, EventsTable, ProcessId), - {ok, {ColumnsPr, RowsPr}, {ColumnstEv, RowsEv}, LastEventId} + {ok, ColumnstSt, RowsSt} = do_get_state(Connection, GensTable, ProcessId, GenCondition), + {ok, {ColumnsPr, RowsPr}, {ColumnstSt, RowsSt}} end end ), case RawResult of {error, _} = Error -> Error; - {ok, {ProcColumns, ProcRows}, {EventsColumns, EventsRows}, LastEventId} -> - [Process] = to_maps(ProcColumns, ProcRows, fun marshal_process/1), - History = to_maps(EventsColumns, EventsRows, fun marshal_event/1), - {ok, Process#{history => History, last_event_id => LastEventId, range => HistoryRange}} - end. - -%%% - --spec put_process_data( - pg_opts(), - namespace_id(), - id(), - #{process := process(), init_task := task(), active_task => task() | undefined} -) -> - {ok, _Result} | {error, _Reason}. -put_process_data(PgOpts, NsId, ProcessId, ProcessData) -> - #{ - process := #{ - history := Events - } = Process, - init_task := InitTask - } = ProcessData, - ActiveTask = maps:get(active_task, ProcessData, undefined), - Pool = get_pool(external, PgOpts), - #{ - processes := ProcessesTable, - tasks := TaskTable, - schedule := ScheduleTable, - events := EventsTable - } = prg_pg_utils:tables(NsId), - epg_pool:transaction( - Pool, - fun(Connection) -> - case do_save_process(Connection, ProcessesTable, Process) of - {ok, _} -> - {ok, _, _, [{InitTaskId}]} = do_save_task(Connection, TaskTable, InitTask), - lists:foreach( - fun(Ev) -> - {ok, _} = do_save_event(Connection, EventsTable, ProcessId, InitTaskId, Ev) - end, - Events - ), - ok = maybe_schedule_task(Connection, TaskTable, ScheduleTable, ActiveTask), - {ok, ok}; - {error, #error{codename = unique_violation}} -> - {error, <<"process already exists">>} + {ok, {ProcColumns, ProcRows}, {StateColumns, StateRows}} -> + [Process] = to_maps(ProcColumns, ProcRows, fun prg_pg_utils:marshal_process/1), + case to_maps(StateColumns, StateRows, fun prg_pg_utils:marshal_process_state/1) of + [] -> + {ok, Process}; + [ProcState] -> + {ok, Process#{state => ProcState}} end - end - ). + end. -spec remove_process(pg_opts(), namespace_id(), id()) -> ok | no_return(). remove_process(PgOpts, NsId, ProcessId) -> @@ -194,7 +152,7 @@ remove_process(PgOpts, NsId, ProcessId) -> tasks := TaskTable, schedule := ScheduleTable, running := RunningTable, - events := EventsTable + generations := GensTable } = prg_pg_utils:tables(NsId), epg_pool:transaction( Pool, @@ -204,8 +162,9 @@ remove_process(PgOpts, NsId, ProcessId) -> {ok, _S} = epg_pool:query(Connection, "DELETE FROM " ++ ScheduleTable ++ " WHERE process_id = $1", [ProcessId]), {ok, _E} = - epg_pool:query(Connection, "DELETE FROM " ++ EventsTable ++ " WHERE process_id = $1", [ProcessId]), - {ok, _T} = epg_pool:query(Connection, "DELETE FROM " ++ TaskTable ++ " WHERE process_id = $1", [ProcessId]), + epg_pool:query(Connection, "DELETE FROM " ++ GensTable ++ " WHERE process_id = $1", [ProcessId]), + {ok, _T} = + epg_pool:query(Connection, "DELETE FROM " ++ TaskTable ++ " WHERE process_id = $1", [ProcessId]), {ok, _P} = epg_pool:query(Connection, "DELETE FROM " ++ ProcessesTable ++ " WHERE process_id = $1", [ProcessId]) end @@ -280,18 +239,18 @@ search_timers(PgOpts, NsId, _Timeout, Limit) -> " ORDER BY scheduled_time ASC LIMIT $3)" " RETURNING" " task_id, process_id, task_type, 'running'::task_status as status, scheduled_time, " - " TO_TIMESTAMP($2, 'YYYY-MM-DD HH24:MI:SS') as running_time, args, metadata, " + " TO_TIMESTAMP($2, 'YYYY-MM-DD HH24:MI:SS') as running_time, args, generation, metadata, " " last_retry_interval, attempts_count, context" " ) " "INSERT INTO " ++ RunningTable ++ " (task_id, process_id, task_type, status, scheduled_time, running_time," - " args, metadata, last_retry_interval, attempts_count, context) " + " args, generation, metadata, last_retry_interval, attempts_count, context) " " SELECT * FROM tasks_for_run RETURNING *", [Now, NowText, Limit] ) end ), - to_maps(Columns, Rows, fun marshal_task/1). + to_maps(Columns, Rows, fun prg_pg_utils:marshal_task/1). -spec search_calls(pg_opts(), namespace_id(), pos_integer()) -> [task()]. search_calls(PgOpts, NsId, Limit) -> @@ -317,18 +276,18 @@ search_calls(PgOpts, NsId, Limit) -> " GROUP BY process_id ORDER BY min ASC LIMIT $2" " ) " " RETURNING task_id, process_id, task_type, 'running'::task_status as status, scheduled_time, " - " TO_TIMESTAMP($1, 'YYYY-MM-DD HH24:MI:SS') as running_time, args, metadata, " + " TO_TIMESTAMP($1, 'YYYY-MM-DD HH24:MI:SS') as running_time, args, generation, metadata, " " last_retry_interval, attempts_count, context" " ) " "INSERT INTO " ++ RunningTable ++ " (task_id, process_id, task_type, status, scheduled_time, running_time, args," - " metadata, last_retry_interval, attempts_count, context) " + " generation, metadata, last_retry_interval, attempts_count, context) " " SELECT * FROM tasks_for_run RETURNING *", [Now, Limit] ) end ), - to_maps(Columns, Rows, fun marshal_task/1). + to_maps(Columns, Rows, fun prg_pg_utils:marshal_task/1). -spec prepare_init(pg_opts(), namespace_id(), process(), task()) -> {ok, {postpone, task_id()} | {continue, task_id()}} | {error, _Reason}. @@ -435,20 +394,20 @@ prepare_repair(PgOpts, NsId, _ProcessId, #{status := <<"running">>} = Task) -> end ). --spec complete_and_continue(pg_opts(), namespace_id(), task_result(), process(), [event()], task()) -> +-spec complete_and_continue(pg_opts(), namespace_id(), task_result(), process(), process_state(), task()) -> {ok, [task()]}. -complete_and_continue(PgOpts, NsId, TaskResult, Process, Events, NextTask) -> +complete_and_continue(PgOpts, NsId, TaskResult, Process, NextGenerationState, NextTask) -> % update completed task and process, % cancel blocked and waiting timers, % save new timer, - % return continuation call if exists + % return continuation call if exists or new timer if need run Pool = get_pool(internal, PgOpts), #{ processes := ProcessesTable, tasks := TaskTable, schedule := ScheduleTable, running := RunningTable, - events := EventsTable + generations := GensTable } = prg_pg_utils:tables(NsId), #{task_id := TaskId} = TaskResult, #{process_id := ProcessId} = Process, @@ -456,13 +415,7 @@ complete_and_continue(PgOpts, NsId, TaskResult, Process, Events, NextTask) -> Pool, fun(Connection) -> {ok, _} = do_update_process(Connection, ProcessesTable, Process), - %% TODO implement via batch execute - lists:foreach( - fun(Ev) -> - {ok, _} = do_save_event(Connection, EventsTable, ProcessId, TaskId, Ev) - end, - Events - ), + {ok, _} = do_save_generation(Connection, GensTable, ProcessId, TaskId, NextGenerationState), {ok, _, _} = do_cancel_timer(Connection, TaskTable, ScheduleTable, ProcessId), {ok, _, _, [{NextTaskId}]} = do_save_task(Connection, TaskTable, NextTask), case @@ -499,11 +452,11 @@ complete_and_continue(PgOpts, NsId, TaskResult, Process, Events, NextTask) -> end end ), - {ok, to_maps(Columns, Rows, fun marshal_task/1)}. + {ok, to_maps(Columns, Rows, fun prg_pg_utils:marshal_task/1)}. --spec complete_and_suspend(pg_opts(), namespace_id(), task_result(), process(), [event()]) -> +-spec complete_and_suspend(pg_opts(), namespace_id(), task_result(), process(), process_state()) -> {ok, [task()]}. -complete_and_suspend(PgOpts, NsId, TaskResult, Process, Events) -> +complete_and_suspend(PgOpts, NsId, TaskResult, Process, NextGenerationState) -> % update completed task and process, cancel blocked and waiting timers Pool = get_pool(internal, PgOpts), #{ @@ -511,7 +464,7 @@ complete_and_suspend(PgOpts, NsId, TaskResult, Process, Events) -> tasks := TaskTable, schedule := ScheduleTable, running := RunningTable, - events := EventsTable + generations := GensTable } = prg_pg_utils:tables(NsId), #{task_id := TaskId} = TaskResult, #{process_id := ProcessId} = Process, @@ -519,17 +472,12 @@ complete_and_suspend(PgOpts, NsId, TaskResult, Process, Events) -> Pool, fun(Connection) -> {ok, _} = do_update_process(Connection, ProcessesTable, Process), - lists:foreach( - fun(Ev) -> - {ok, _} = do_save_event(Connection, EventsTable, ProcessId, TaskId, Ev) - end, - Events - ), + {ok, _} = do_save_generation(Connection, GensTable, ProcessId, TaskId, NextGenerationState), {ok, _, _} = do_cancel_timer(Connection, TaskTable, ScheduleTable, ProcessId), do_complete_task(Connection, TaskTable, ScheduleTable, RunningTable, TaskResult#{process_id => ProcessId}) end ), - {ok, to_maps(Columns, Rows, fun marshal_task/1)}. + {ok, to_maps(Columns, Rows, fun prg_pg_utils:marshal_task/1)}. -spec complete_and_error(pg_opts(), namespace_id(), task_result(), process()) -> ok. complete_and_error(PgOpts, NsId, TaskResult, Process) -> @@ -558,9 +506,9 @@ complete_and_error(PgOpts, NsId, TaskResult, Process) -> ), ok. --spec complete_and_unlock(pg_opts(), namespace_id(), task_result(), process(), [event()]) -> +-spec complete_and_unlock(pg_opts(), namespace_id(), task_result(), process(), process_state()) -> {ok, [task()]}. -complete_and_unlock(PgOpts, NsId, TaskResult, Process, Events) -> +complete_and_unlock(PgOpts, NsId, TaskResult, Process, NextGenerationState) -> % update completed task and process, unlock blocked task Pool = get_pool(internal, PgOpts), #{ @@ -568,7 +516,7 @@ complete_and_unlock(PgOpts, NsId, TaskResult, Process, Events) -> tasks := TaskTable, schedule := ScheduleTable, running := RunningTable, - events := EventsTable + generations := GensTable } = prg_pg_utils:tables(NsId), #{task_id := TaskId} = TaskResult, #{process_id := ProcessId} = Process, @@ -576,12 +524,7 @@ complete_and_unlock(PgOpts, NsId, TaskResult, Process, Events) -> Pool, fun(Connection) -> {ok, _} = do_update_process(Connection, ProcessesTable, Process), - lists:foreach( - fun(Ev) -> - {ok, _} = do_save_event(Connection, EventsTable, ProcessId, TaskId, Ev) - end, - Events - ), + {ok, _} = do_save_generation(Connection, GensTable, ProcessId, TaskId, NextGenerationState), Completion = do_complete_task( Connection, TaskTable, @@ -600,7 +543,7 @@ complete_and_unlock(PgOpts, NsId, TaskResult, Process, Events) -> Completion end ), - {ok, to_maps(Columns, Rows, fun marshal_task/1)}. + {ok, to_maps(Columns, Rows, fun prg_pg_utils:marshal_task/1)}. -spec db_init(pg_opts(), namespace_id()) -> ok. db_init(PgOpts, NsId) -> @@ -616,58 +559,35 @@ cleanup(PgOpts, NsId) -> %% Internal functions -create_range_condition(Range) -> - after_id(Range) ++ direction(Range) ++ limit(Range). - -after_id(#{offset := After} = Range) -> - Direction = maps:get(direction, Range, forward), - " AND event_id " ++ operator(Direction) ++ integer_to_list(After) ++ " "; -after_id(_) -> - " ". - -operator(forward) -> - " > "; -operator(backward) -> - " < ". +generation_condition(latest) -> + " ORDER BY generation DESC LIMIT 1"; +generation_condition(Generation) -> + " AND generation = " ++ integer_to_list(Generation) ++ " ". -limit(#{limit := Limit}) -> - " LIMIT " ++ integer_to_list(Limit) ++ " "; -limit(_) -> - " ". - -direction(#{direction := backward}) -> - " ORDER BY event_id DESC "; -direction(_) -> - " ORDER BY event_id ASC ". - -do_get_process(Connection, Table, ProcessId) -> +do_get_state(Connection, GensTable, ProcessId, GenCondition) -> + SQL = "SELECT * FROM " ++ GensTable ++ " WHERE process_id = $1 " ++ GenCondition, epg_pool:query( Connection, - "SELECT * from " ++ Table ++ " WHERE process_id = $1", + SQL, [ProcessId] ). -do_get_events(Connection, EventsTable, ProcessId, RangeCondition) -> - SQL = "SELECT * FROM " ++ EventsTable ++ " WHERE process_id = $1 " ++ RangeCondition, +do_get_process(Connection, Table, ProcessId) -> epg_pool:query( Connection, - SQL, + "SELECT * from " ++ Table ++ " WHERE process_id = $1", [ProcessId] ). -get_last_event_id(Connection, EventsTable, ProcessId) -> - SQL = "SELECT max(event_id) FROM " ++ EventsTable ++ " WHERE process_id = $1", - Result = epg_pool:query( - Connection, - SQL, - [ProcessId] - ), - case Result of - {ok, _, [{null}]} -> - 0; - {ok, _, [{Value}]} -> - Value - end. +%define_generation(Connection, Table, ProcessId, latest) -> +% {ok, _Columns, [{Generation}]} = epg_pool:query( +% Connection, +% "SELECT current_generation from " ++ Table ++ " WHERE process_id = $1", +% [ProcessId] +% ), +% Generation; +%define_generation(_Connection, _Table, _ProcessId, Generation) -> +% Generation. do_save_process(Connection, Table, Process) -> #{ @@ -677,23 +597,18 @@ do_save_process(Connection, Table, Process) -> Detail = maps:get(detail, Process, null), AuxState = maps:get(aux_state, Process, null), Meta = maps:get(metadata, Process, null), + Generation = maps:get(current_generation, Process, null), epg_pool:query( Connection, - "INSERT INTO " ++ Table ++ " (process_id, status, detail, aux_state, metadata) VALUES ($1, $2, $3, $4, $5)", - [ProcessId, Status, Detail, AuxState, json_encode(Meta)] + "INSERT INTO " ++ Table ++ " (process_id, status, detail, aux_state, metadata, current_generation)" ++ + " VALUES ($1, $2, $3, $4, $5, $6)", + [ProcessId, Status, Detail, AuxState, json_encode(Meta), Generation] ). -maybe_schedule_task(_Connection, _TaskTable, _ScheduleTable, undefined) -> - ok; -maybe_schedule_task(Connection, TaskTable, ScheduleTable, Task) -> - {ok, _, _, [{TaskId}]} = do_save_task(Connection, TaskTable, Task), - {ok, _, _, _} = do_save_schedule(Connection, ScheduleTable, Task#{task_id => TaskId}), - ok. - -do_save_task(Connection, Table, Task) -> - do_save_task(Connection, Table, Task, " task_id "). +do_save_task(Connection, TaskTable, Task) -> + do_save_task(Connection, TaskTable, Task, " task_id "). -do_save_task(Connection, Table, Task, Returning) -> +do_save_task(Connection, TaskTable, Task, Returning) -> #{ process_id := ProcessId, task_type := TaskType, @@ -702,6 +617,7 @@ do_save_task(Connection, Table, Task, Returning) -> attempts_count := AttemptsCount } = Task, Args = maps:get(args, Task, null), + Generation = maps:get(generation, Task, null), MetaData = maps:get(metadata, Task, null), IdempotencyKey = maps:get(idempotency_key, Task, null), BlockedTask = maps:get(blocked_task, Task, null), @@ -712,11 +628,11 @@ do_save_task(Connection, Table, Task, Returning) -> Context = maps:get(context, Task, <<>>), epg_pool:query( Connection, - "INSERT INTO " ++ Table ++ + "INSERT INTO " ++ TaskTable ++ " " - " (process_id, task_type, status, scheduled_time, running_time, finished_time, args, " + " (process_id, task_type, status, scheduled_time, running_time, finished_time, args, generation," " metadata, idempotency_key, blocked_task, response, last_retry_interval, attempts_count, context)" - "VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14) RETURNING " ++ Returning, + "VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15) RETURNING " ++ Returning, [ ProcessId, TaskType, @@ -725,6 +641,7 @@ do_save_task(Connection, Table, Task, Returning) -> RunningTs, FinishedTs, Args, + Generation, json_encode(MetaData), IdempotencyKey, BlockedTask, @@ -734,7 +651,6 @@ do_save_task(Connection, Table, Task, Returning) -> Context ] ). -%% do_save_running(Connection, Table, Task) -> do_save_running(Connection, Table, Task, " task_id "). @@ -750,6 +666,7 @@ do_save_running(Connection, Table, Task, Returning) -> attempts_count := AttemptsCount } = Task, Args = maps:get(args, Task, null), + Generation = maps:get(generation, Task, null), MetaData = maps:get(metadata, Task, null), RunningTs = erlang:system_time(second), Context = maps:get(context, Task, <<>>), @@ -758,8 +675,8 @@ do_save_running(Connection, Table, Task, Returning) -> "INSERT INTO " ++ Table ++ " " " (task_id, process_id, task_type, status, scheduled_time, running_time, " - " args, metadata, last_retry_interval, attempts_count, context)" - "VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) " + " args, generation, metadata, last_retry_interval, attempts_count, context)" + "VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12) " " ON CONFLICT (process_id) DO NOTHING RETURNING " ++ Returning, [ TaskId, @@ -769,6 +686,7 @@ do_save_running(Connection, Table, Task, Returning) -> unixtime_to_datetime(ScheduledTs), unixtime_to_datetime(RunningTs), Args, + Generation, json_encode(MetaData), LastRetryInterval, AttemptsCount, @@ -790,15 +708,16 @@ do_save_schedule(Connection, Table, Task, Returning) -> attempts_count := AttemptsCount } = Task, Args = maps:get(args, Task, null), + Generation = maps:get(generation, Task, null), MetaData = maps:get(metadata, Task, null), Context = maps:get(context, Task, <<>>), epg_pool:query( Connection, "INSERT INTO " ++ Table ++ " " - " (task_id, process_id, task_type, status, scheduled_time, args," + " (task_id, process_id, task_type, status, scheduled_time, args, generation, " " metadata, last_retry_interval, attempts_count, context)" - "VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) RETURNING " ++ Returning, + "VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) RETURNING " ++ Returning, [ TaskId, ProcessId, @@ -806,6 +725,7 @@ do_save_schedule(Connection, Table, Task, Returning) -> Status, unixtime_to_datetime(ScheduledTs), Args, + Generation, json_encode(MetaData), LastRetryInterval, AttemptsCount, @@ -836,6 +756,7 @@ do_get_task(Connection, Table, TaskId) -> do_update_process(Connection, ProcessesTable, Process) -> #{ process_id := ProcessId, + current_generation := Generation, status := Status } = Process, Detail = maps:get(detail, Process, null), @@ -845,24 +766,26 @@ do_update_process(Connection, ProcessesTable, Process) -> epg_pool:query( Connection, "UPDATE " ++ ProcessesTable ++ - " SET status = $1, detail = $2, aux_state = $3, metadata = $4, corrupted_by = $5 " - "WHERE process_id = $6", - [Status, Detail, AuxState, json_encode(MetaData), CorruptedBy, ProcessId] + " SET status = $1, detail = $2, aux_state = $3, metadata = $4, corrupted_by = $5, current_generation = $6 " + "WHERE process_id = $7", + [Status, Detail, AuxState, json_encode(MetaData), CorruptedBy, Generation, ProcessId] ). -do_save_event(Connection, EventsTable, ProcessId, TaskId, Event) -> +do_save_generation(_Connection, _GensTable, _ProcessId, _TaskId, undefined) -> + {ok, 0}; +do_save_generation(Connection, GensTable, ProcessId, TaskId, ProcessState) -> #{ - event_id := EventId, - timestamp := EventTs, + generation := Generation, + timestamp := GenTs, payload := Payload - } = Event, - MetaData = maps:get(metadata, Event, null), + } = ProcessState, + MetaData = maps:get(metadata, ProcessState, null), epg_pool:query( Connection, - "INSERT INTO " ++ EventsTable ++ - " (process_id, task_id, event_id, timestamp, payload, metadata) " + "INSERT INTO " ++ GensTable ++ + " (process_id, task_id, generation, timestamp, payload, metadata) " "VALUES ($1, $2, $3, $4, $5, $6)", - [ProcessId, TaskId, EventId, unixtime_to_datetime(EventTs), Payload, json_encode(MetaData)] + [ProcessId, TaskId, Generation, unixtime_to_datetime(GenTs), Payload, json_encode(MetaData)] ). do_complete_task(Connection, TaskTable, ScheduleTable, RunningTable, TaskResult) -> @@ -871,16 +794,16 @@ do_complete_task(Connection, TaskTable, ScheduleTable, RunningTable, TaskResult) process_id := ProcessId, status := Status } = TaskResult, + Generation = maps:get(generation, TaskResult, null), Response = maps:get(response, TaskResult, null), FinishedTime = maps:get(finished_time, TaskResult, erlang:system_time(second)), {ok, _} = epg_pool:query( Connection, - "WITH deleted AS(" - " DELETE FROM " ++ RunningTable ++ - " WHERE process_id = $4" - " )" - "UPDATE " ++ TaskTable ++ " SET status = $1, response = $2, finished_time = $3 WHERE task_id = $5", - [Status, Response, unixtime_to_datetime(FinishedTime), ProcessId, TaskId] + "WITH deleted AS (DELETE FROM " ++ RunningTable ++ + " WHERE process_id = $1)" + "UPDATE " ++ TaskTable ++ + " SET status = $2, response = $3, finished_time = $4, generation = $5 WHERE task_id = $6", + [ProcessId, Status, Response, unixtime_to_datetime(FinishedTime), Generation, TaskId] ), case Status of <<"error">> -> @@ -897,12 +820,12 @@ do_complete_task(Connection, TaskTable, ScheduleTable, RunningTable, TaskResult) " (SELECT min(task_id) FROM " ++ ScheduleTable ++ " WHERE process_id = $1 AND status = 'waiting' AND task_type IN ('call', 'repair')) " " RETURNING task_id, process_id, task_type, 'running'::task_status as status, scheduled_time, " - " TO_TIMESTAMP($2, 'YYYY-MM-DD HH24:MI:SS') as running_time, args, metadata, " + " TO_TIMESTAMP($2, 'YYYY-MM-DD HH24:MI:SS') as running_time, args, generation, metadata, " " last_retry_interval, attempts_count, context" " ) " "INSERT INTO " ++ RunningTable ++ " (task_id, process_id, task_type, status, scheduled_time, running_time, args, " - " metadata, last_retry_interval, attempts_count, context)" + " generation, metadata, last_retry_interval, attempts_count, context)" " SELECT * FROM postponed_tasks RETURNING *", [ProcessId, RunningTime] ) @@ -1035,65 +958,6 @@ json_encode(null) -> json_encode(MetaData) -> jsx:encode(MetaData). -%% Marshalling - -marshal_task(Task) -> - maps:fold( - fun - (_, null, Acc) -> Acc; - (<<"task_id">>, TaskId, Acc) -> Acc#{task_id => TaskId}; - (<<"process_id">>, ProcessId, Acc) -> Acc#{process_id => ProcessId}; - (<<"task_type">>, TaskType, Acc) -> Acc#{task_type => TaskType}; - (<<"status">>, Status, Acc) -> Acc#{status => Status}; - (<<"scheduled_time">>, Ts, Acc) -> Acc#{scheduled_time => Ts}; - (<<"running_time">>, Ts, Acc) -> Acc#{running_time => Ts}; - (<<"args">>, Args, Acc) -> Acc#{args => Args}; - (<<"metadata">>, MetaData, Acc) -> Acc#{metadata => MetaData}; - (<<"idempotency_key">>, IdempotencyKey, Acc) -> Acc#{idempotency_key => IdempotencyKey}; - (<<"response">>, Response, Acc) -> Acc#{response => Response}; - (<<"blocked_task">>, BlockedTaskId, Acc) -> Acc#{blocked_task => BlockedTaskId}; - (<<"last_retry_interval">>, LastRetryInterval, Acc) -> Acc#{last_retry_interval => LastRetryInterval}; - (<<"attempts_count">>, AttemptsCount, Acc) -> Acc#{attempts_count => AttemptsCount}; - (<<"context">>, Context, Acc) -> Acc#{context => Context}; - (_, _, Acc) -> Acc - end, - #{}, - Task - ). - -marshal_process(Process) -> - maps:fold( - fun - (_, null, Acc) -> Acc; - (<<"process_id">>, ProcessId, Acc) -> Acc#{process_id => ProcessId}; - (<<"status">>, Status, Acc) -> Acc#{status => Status}; - (<<"detail">>, Detail, Acc) -> Acc#{detail => Detail}; - (<<"aux_state">>, AuxState, Acc) -> Acc#{aux_state => AuxState}; - (<<"metadata">>, Meta, Acc) -> Acc#{metadata => Meta}; - (<<"corrupted_by">>, CorruptedBy, Acc) -> Acc#{corrupted_by => CorruptedBy}; - (_, _, Acc) -> Acc - end, - #{}, - Process - ). - -marshal_event(Event) -> - maps:fold( - fun - (_, null, Acc) -> Acc; - (<<"process_id">>, ProcessId, Acc) -> Acc#{process_id => ProcessId}; - (<<"task_id">>, TaskId, Acc) -> Acc#{task_id => TaskId}; - (<<"event_id">>, EventId, Acc) -> Acc#{event_id => EventId}; - (<<"timestamp">>, Ts, Acc) -> Acc#{timestamp => Ts}; - (<<"metadata">>, MetaData, Acc) -> Acc#{metadata => MetaData}; - (<<"payload">>, Payload, Acc) -> Acc#{payload => Payload}; - (_, _, Acc) -> Acc - end, - #{}, - Event - ). -%% - get_pool(internal, #{pool := Pool}) -> Pool; get_pool(external, #{pool := BasePool} = PgOpts) -> diff --git a/src/storage/postgres/prg_pg_cache.erl b/src/storage/postgres/prg_pg_cache.erl index b14845f..ea2c8bd 100644 --- a/src/storage/postgres/prg_pg_cache.erl +++ b/src/storage/postgres/prg_pg_cache.erl @@ -61,9 +61,9 @@ start(Replications) -> Replications ). --spec get(namespace_id(), id(), history_range()) -> {ok, _Result} | undefined. -get(NsID, ProcessID, HistoryRange) -> - do_get(NsID, ProcessID, HistoryRange). +-spec get(namespace_id(), id(), generation()) -> {ok, _Result} | undefined. +get(NsID, ProcessID, Generation) -> + do_get(NsID, ProcessID, Generation). -spec start_link( CacheRef :: atom(), @@ -175,7 +175,7 @@ create_publication_if_not_exists(Connection, NsID) -> PubNameEscaped = "\"" ++ PubName ++ "\"", #{ processes := ProcessesTable, - events := EventsTable + generations := GensTable } = prg_pg_utils:tables(NsID), %% TODO must be transaction (race condition) {ok, _, [{IsPublicationExists}]} = epgsql:equery( @@ -190,7 +190,7 @@ create_publication_if_not_exists(Connection, NsID) -> {ok, _, _} = epgsql:equery( Connection, "CREATE PUBLICATION " ++ PubNameEscaped ++ - " FOR TABLE " ++ ProcessesTable ++ " , " ++ EventsTable + " FOR TABLE " ++ ProcessesTable ++ " , " ++ GensTable ), {ok, PubName} end. @@ -200,11 +200,11 @@ create_tables(NsIDs) -> fun(NsID, Acc) -> #{ processes := ProcessesTable, - events := EventsTable + generations := GensTable } = tables(NsID), ProcessesETS = ets:new(ProcessesTable, [named_table]), - EventsETS = ets:new(EventsTable, [named_table, bag]), - [ProcessesETS, EventsETS | Acc] + GensETS = ets:new(GensTable, [named_table, bag]), + [ProcessesETS, GensETS | Acc] end, [], NsIDs @@ -219,23 +219,23 @@ cleanup_tables(#{tables := Tables}) -> cleanup_tables(_) -> ok. -do_get(NsID, ProcessID, HistoryRange) -> +do_get(NsID, ProcessID, Generation) -> #{ processes := ProcessesTable, - events := EventsTable + generations := GensTable } = tables(NsID), - ProcessResult = ets:lookup(ProcessesTable, ProcessID), - case ProcessResult of + case ets:lookup(ProcessesTable, ProcessID) of [] -> undefined; [{_, ProcessRaw}] -> - Process = prg_pg_utils:marshal_process(ProcessRaw), - {EventsRaw, LastEventID} = process_by_range(ets:lookup(EventsTable, ProcessID), HistoryRange), - Events = lists:map( - fun(Ev) -> prg_pg_utils:marshal_event(convert_event(Ev)) end, - EventsRaw - ), - {ok, Process#{history => Events, last_event_id => LastEventID, range => HistoryRange}} + case get_process_state(ets:lookup(GensTable, ProcessID), Generation) of + undefined -> + undefined; + ProcessStateRaw -> + Process = prg_pg_utils:marshal_process(ProcessRaw), + ProcessState = prg_pg_utils:marshal_process_state(convert_state(ProcessStateRaw)), + {ok, Process#{state => ProcessState}} + end end. process_operation({Table, _, _} = ReplData, State) -> @@ -246,34 +246,34 @@ process_operation(<<"processes">>, NsID, {_Table, insert, #{<<"process_id">> := %% process created #{ processes := ProcessesTable, - events := EventsTable + generations := GensTable } = tables(NsID), true = ets:insert(ProcessesTable, {ProcessID, Row}), - reset_timer([ProcessesTable, EventsTable], ProcessID, State); -process_operation(<<"events">>, NsID, {_Table, insert, #{<<"process_id">> := ProcessID} = Row}, State) -> + reset_timer([ProcessesTable, GensTable], ProcessID, State); +process_operation(<<"generations">>, NsID, {_Table, insert, #{<<"process_id">> := ProcessID} = Row}, State) -> #{ processes := ProcessesTable, - events := EventsTable + generations := GensTable } = tables(NsID), case ets:lookup(ProcessesTable, ProcessID) of [_Process] -> - %% known cached process, save events - true = ets:insert(EventsTable, {ProcessID, Row}), - reset_timer([ProcessesTable, EventsTable], ProcessID, State); + %% known cached process, save next generation state + true = ets:insert(GensTable, {ProcessID, Row}), + reset_timer([ProcessesTable, GensTable], ProcessID, State); [] -> %% old process, not cached, ignore State end; -process_operation(_, NsID, {Table, update, #{<<"process_id">> := ProcessID} = Row}, State) -> - TableETS = binary_to_atom(Table), - case ets:lookup(TableETS, ProcessID) of +%% update operation is not applicable for generations table (append only) +process_operation(<<"processes">>, NsID, {_Table, update, #{<<"process_id">> := ProcessID} = Row}, State) -> + #{ + processes := ProcessesTable, + generations := GensTable + } = tables(NsID), + case ets:lookup(ProcessesTable, ProcessID) of [{_, OldRow}] -> - #{ - processes := ProcessesTable, - events := EventsTable - } = tables(NsID), - true = ets:insert(TableETS, {ProcessID, maps:merge(OldRow, Row)}), - reset_timer([ProcessesTable, EventsTable], ProcessID, State); + true = ets:insert(ProcessesTable, {ProcessID, maps:merge(OldRow, Row)}), + reset_timer([ProcessesTable, GensTable], ProcessID, State); [] -> State end; @@ -302,69 +302,29 @@ tables(NsID) -> NsStr = atom_to_list(NsID), #{ processes => list_to_atom(NsStr ++ "_processes"), - events => list_to_atom(NsStr ++ "_events") + generations => list_to_atom(NsStr ++ "_generations") }. -process_by_range([], _) -> - {[], 0}; -process_by_range(Events, #{direction := backward, offset := After} = Range) -> - [{_, #{<<"event_id">> := LastEventID}} | _] = - Reversed = lists:sort( - fun({_, #{<<"event_id">> := EventID1}}, {_, #{<<"event_id">> := EventID2}}) -> EventID1 > EventID2 end, - Events - ), - Limit = maps:get(limit, Range, erlang:length(Events)), - Filtered = lists:filtermap( - fun - ({_, #{<<"event_id">> := EvID} = Ev}) when EvID < After -> {true, Ev}; - (_) -> false - end, - Reversed - ), - {lists:sublist(Filtered, Limit), LastEventID}; -process_by_range(Events, #{direction := backward} = Range) -> - [{_, #{<<"event_id">> := LastEventID}} | _] = - Reversed = lists:sort( - fun({_, #{<<"event_id">> := EventID1}}, {_, #{<<"event_id">> := EventID2}}) -> EventID1 > EventID2 end, - Events - ), - Limit = maps:get(limit, Range, erlang:length(Events)), - History = lists:map( - fun({_, Ev}) -> Ev end, - lists:sublist(Reversed, Limit) - ), - {History, LastEventID}; -process_by_range(Events, #{offset := After} = Range) -> - [{_, #{<<"event_id">> := LastEventID}} | _] = - Sorted = lists:sort( - fun({_, #{<<"event_id">> := EventID1}}, {_, #{<<"event_id">> := EventID2}}) -> EventID1 < EventID2 end, - Events - ), - Limit = maps:get(limit, Range, erlang:length(Events)), - Filtered = lists:filtermap( - fun - ({_, #{<<"event_id">> := EvID} = Ev}) when EvID > After -> {true, Ev}; - (_) -> false - end, - Sorted - ), - {lists:sublist(Filtered, Limit), LastEventID}; -process_by_range(Events, Range) -> - [{_, #{<<"event_id">> := LastEventID}} | _] = - Sorted = lists:sort( - fun({_, #{<<"event_id">> := EventID1}}, {_, #{<<"event_id">> := EventID2}}) -> EventID1 < EventID2 end, - Events - ), - History = lists:map( - fun({_, Ev}) -> Ev end, - Sorted +get_process_state([], _Generation) -> + undefined; +get_process_state(Generations, latest) -> + [{_, ProcessState} | _] = lists:sort( + fun({_, #{<<"generation">> := GenA}}, {_, #{<<"generation">> := GenB}}) -> GenA > GenB end, + Generations ), - Limit = maps:get(limit, Range, erlang:length(History)), - {lists:sublist(History, Limit), LastEventID}. - -convert_event(#{<<"timestamp">> := null} = Event) -> - Event; -convert_event(#{<<"timestamp">> := DateTime} = Event) -> - Event#{<<"timestamp">> => prg_pg_utils:convert(timestamp, DateTime)}; -convert_event(Event) -> - Event. + ProcessState; +get_process_state(Generations, Gen) -> + Result = lists:search(fun({_, #{<<"generation">> := G}}) -> G =:= Gen end, Generations), + case Result of + {value, {_, ProcessState}} -> + ProcessState; + false -> + undefined + end. + +convert_state(#{<<"timestamp">> := null} = State) -> + State; +convert_state(#{<<"timestamp">> := DateTime} = State) -> + State#{<<"timestamp">> => prg_pg_utils:convert(timestamp, DateTime)}; +convert_state(State) -> + State. diff --git a/src/storage/postgres/prg_pg_migration.erl b/src/storage/postgres/prg_pg_migration.erl index fa97d98..c1f983e 100644 --- a/src/storage/postgres/prg_pg_migration.erl +++ b/src/storage/postgres/prg_pg_migration.erl @@ -12,7 +12,7 @@ db_init(#{pool := Pool}, NsId) -> tasks := TaskTable, schedule := ScheduleTable, running := RunningTable, - events := EventsTable + generations := GensTable } = prg_pg_utils:tables(NsId), {ok, _, _} = epg_pool:transaction( Pool, @@ -68,10 +68,11 @@ db_init(#{pool := Pool}, NsId) -> Connection, "CREATE TABLE IF NOT EXISTS " ++ ProcessesTable ++ " (" - "process_id VARCHAR(80) PRIMARY KEY, " + "process_id VARCHAR(256) PRIMARY KEY, " "status process_status NOT NULL, " "detail TEXT, " "aux_state BYTEA, " + "current_generation INTEGER, " "created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now(), " "metadata JSONB)" ), @@ -81,15 +82,16 @@ db_init(#{pool := Pool}, NsId) -> "CREATE TABLE IF NOT EXISTS " ++ TaskTable ++ " (" "task_id BIGSERIAL PRIMARY KEY, " - "process_id VARCHAR(80) NOT NULL, " + "process_id VARCHAR(256) NOT NULL, " "task_type task_type NOT NULL, " "status task_status NOT NULL, " "scheduled_time TIMESTAMP WITH TIME ZONE NOT NULL, " "running_time TIMESTAMP WITH TIME ZONE, " "finished_time TIMESTAMP WITH TIME ZONE, " "args BYTEA, " + "generation INTEGER, " "metadata JSONB, " - "idempotency_key VARCHAR(80) UNIQUE, " + "idempotency_key VARCHAR(256) UNIQUE, " "response BYTEA, " "blocked_task BIGINT REFERENCES " ++ TaskTable ++ " (task_id), " @@ -102,7 +104,8 @@ db_init(#{pool := Pool}, NsId) -> {ok, _, _} = epg_pool:query( Connection, "ALTER TABLE " ++ ProcessesTable ++ - " ADD COLUMN IF NOT EXISTS corrupted_by BIGINT REFERENCES " ++ TaskTable ++ "(task_id)" + " ADD COLUMN IF NOT EXISTS corrupted_by BIGINT REFERENCES " ++ TaskTable ++ + " (task_id) DEFERRABLE INITIALLY DEFERRED" ), %% create schedule table @@ -111,11 +114,12 @@ db_init(#{pool := Pool}, NsId) -> "CREATE TABLE IF NOT EXISTS " ++ ScheduleTable ++ " (" "task_id BIGINT PRIMARY KEY, " - "process_id VARCHAR(80) NOT NULL, " + "process_id VARCHAR(256) NOT NULL, " "task_type task_type NOT NULL, " "status task_status NOT NULL, " "scheduled_time TIMESTAMP WITH TIME ZONE NOT NULL, " "args BYTEA, " + "generation INTEGER, " "metadata JSONB, " "last_retry_interval INTEGER NOT NULL, " "attempts_count SMALLINT NOT NULL, " @@ -130,13 +134,14 @@ db_init(#{pool := Pool}, NsId) -> Connection, "CREATE TABLE IF NOT EXISTS " ++ RunningTable ++ " (" - "process_id VARCHAR(80) PRIMARY KEY, " + "process_id VARCHAR(256) PRIMARY KEY, " "task_id BIGINT NOT NULL, " "task_type task_type NOT NULL, " "status task_status NOT NULL, " "scheduled_time TIMESTAMP WITH TIME ZONE NOT NULL, " "running_time TIMESTAMP WITH TIME ZONE NOT NULL, " "args BYTEA, " + "generation INTEGER, " "metadata JSONB, " "last_retry_interval INTEGER NOT NULL, " "attempts_count SMALLINT NOT NULL, " @@ -146,26 +151,55 @@ db_init(#{pool := Pool}, NsId) -> "FOREIGN KEY (task_id) REFERENCES " ++ TaskTable ++ " (task_id))" ), - %% create events table + %% create state generations table {ok, _, _} = epg_pool:query( Connection, - "CREATE TABLE IF NOT EXISTS " ++ EventsTable ++ + "CREATE TABLE IF NOT EXISTS " ++ GensTable ++ " (" - "process_id VARCHAR(80) NOT NULL, " + "process_id VARCHAR(256) NOT NULL, " "task_id BIGINT NOT NULL, " - "event_id SMALLINT NOT NULL, " + "generation INTEGER NOT NULL, " "timestamp TIMESTAMP WITH TIME ZONE DEFAULT NOW(), " "metadata JSONB, " "payload BYTEA NOT NULL, " - "PRIMARY KEY (process_id, event_id), " + "PRIMARY KEY (process_id, generation), " "FOREIGN KEY (process_id) REFERENCES " ++ ProcessesTable ++ " (process_id), " "FOREIGN KEY (task_id) REFERENCES " ++ TaskTable ++ " (task_id))" ), + + %% create constraints for generation + %% конструкция IF NOT EXISTS Π½Π΅ΠΏΡ€ΠΈΠΌΠ΅Π½ΠΈΠΌΠ° ΠΊ CONSTRAINT + %% ΠΏΠΎΠΊΠ° ΠΎΡΡ‚Π°Π²Π»ΡŽ это здСсь + %{ok, _, _} = epg_pool:query( + % Connection, + % "ALTER TABLE " ++ ProcessesTable ++ " ADD CONSTRAINT fk_prc_state_gen " + % " FOREIGN KEY (process_id, current_generation) " + % " REFERENCES " ++ GensTable ++ "(process_id, generation) DEFERRABLE INITIALLY DEFERRED" + %), + %{ok, _, _} = epg_pool:query( + % Connection, + % "ALTER TABLE " ++ TaskTable ++ " ADD CONSTRAINT fk_prc_state_gen " + % " FOREIGN KEY (process_id, generation) " + % " REFERENCES " ++ GensTable ++ "(process_id, generation) DEFERRABLE INITIALLY DEFERRED" + %), + %{ok, _, _} = epg_pool:query( + % Connection, + % "ALTER TABLE " ++ ScheduleTable ++ " ADD CONSTRAINT fk_prc_state_gen " + % " FOREIGN KEY (process_id, generation) " + % " REFERENCES " ++ GensTable ++ "(process_id, generation) DEFERRABLE INITIALLY DEFERRED" + %), + %{ok, _, _} = epg_pool:query( + % Connection, + % "ALTER TABLE " ++ RunningTable ++ " ADD CONSTRAINT fk_prc_state_gen " + % " FOREIGN KEY (process_id, generation) " + % " REFERENCES " ++ GensTable ++ "(process_id, generation) DEFERRABLE INITIALLY DEFERRED" + %), + %% create indexes {ok, _, _} = epg_pool:query( Connection, - "CREATE INDEX IF NOT EXISTS process_idx on " ++ EventsTable ++ " USING HASH (process_id)" + "CREATE INDEX IF NOT EXISTS process_idx on " ++ GensTable ++ " USING HASH (process_id)" ), {ok, _, _} = epg_pool:query( Connection, @@ -181,27 +215,6 @@ db_init(#{pool := Pool}, NsId) -> ), %% MIGRATIONS - %% migrate process_id to varchar 256 - ok = lists:foreach( - fun(T) -> - TableStr = string:replace(T, "\"", "'", all), - {ok, _, [{VarSize}]} = epg_pool:query( - Connection, - "SELECT character_maximum_length FROM information_schema.columns " - "WHERE table_name = " ++ TableStr ++ " AND column_name = 'process_id'" - ), - case VarSize < 256 of - true -> - {ok, _, _} = epg_pool:query( - Connection, - "ALTER TABLE " ++ T ++ "ALTER COLUMN process_id TYPE VARCHAR(256)" - ); - false -> - skip - end - end, - [ProcessesTable, TaskTable, ScheduleTable, RunningTable, EventsTable] - ), {ok, [], []} end ), @@ -214,13 +227,12 @@ cleanup(#{pool := Pool}, NsId) -> tasks := TaskTable, schedule := ScheduleTable, running := RunningTable, - events := EventsTable + generations := GensTable } = prg_pg_utils:tables(NsId), epg_pool:transaction( Pool, fun(Connection) -> - {ok, _, _} = epg_pool:query(Connection, "ALTER TABLE " ++ ProcessesTable ++ " DROP COLUMN corrupted_by"), - {ok, _, _} = epg_pool:query(Connection, "DROP TABLE " ++ EventsTable), + {ok, _, _} = epg_pool:query(Connection, "DROP TABLE " ++ GensTable), {ok, _, _} = epg_pool:query(Connection, "DROP TABLE " ++ RunningTable), {ok, _, _} = epg_pool:query(Connection, "DROP TABLE " ++ ScheduleTable), {ok, _, _} = epg_pool:query(Connection, "DROP TABLE " ++ TaskTable), diff --git a/src/storage/postgres/prg_pg_utils.erl b/src/storage/postgres/prg_pg_utils.erl index e3e37f4..a3c2269 100644 --- a/src/storage/postgres/prg_pg_utils.erl +++ b/src/storage/postgres/prg_pg_utils.erl @@ -4,7 +4,8 @@ -export([tables/1]). -export([marshal_process/1]). --export([marshal_event/1]). +-export([marshal_process_state/1]). +-export([marshal_task/1]). -export([convert/2]). -spec tables(namespace_id()) -> map(). @@ -14,10 +15,10 @@ tables(NsId) -> tasks => construct_table_name(NsId, "_tasks"), schedule => construct_table_name(NsId, "_schedule"), running => construct_table_name(NsId, "_running"), - events => construct_table_name(NsId, "_events") + generations => construct_table_name(NsId, "_generations") }. --spec marshal_process(map()) -> map(). +-spec marshal_process(map()) -> process(). marshal_process(Process) -> maps:fold( fun @@ -27,6 +28,7 @@ marshal_process(Process) -> (<<"detail">>, Detail, Acc) -> Acc#{detail => Detail}; (<<"aux_state">>, AuxState, Acc) -> Acc#{aux_state => AuxState}; (<<"metadata">>, Meta, Acc) -> Acc#{metadata => Meta}; + (<<"current_generation">>, Gen, Acc) -> Acc#{current_generation => Gen}; (<<"corrupted_by">>, CorruptedBy, Acc) -> Acc#{corrupted_by => CorruptedBy}; (_, _, Acc) -> Acc end, @@ -34,21 +36,47 @@ marshal_process(Process) -> Process ). --spec marshal_event(map()) -> map(). -marshal_event(Event) -> +-spec marshal_process_state(map()) -> process_state(). +marshal_process_state(ProcessState) -> maps:fold( fun (_, null, Acc) -> Acc; (<<"process_id">>, ProcessId, Acc) -> Acc#{process_id => ProcessId}; (<<"task_id">>, TaskId, Acc) -> Acc#{task_id => TaskId}; - (<<"event_id">>, EventId, Acc) -> Acc#{event_id => EventId}; + (<<"generation">>, Gen, Acc) -> Acc#{generation => Gen}; (<<"timestamp">>, Ts, Acc) -> Acc#{timestamp => Ts}; (<<"metadata">>, MetaData, Acc) -> Acc#{metadata => MetaData}; (<<"payload">>, Payload, Acc) -> Acc#{payload => Payload}; (_, _, Acc) -> Acc end, #{}, - Event + ProcessState + ). + +-spec marshal_task(map()) -> task(). +marshal_task(Task) -> + maps:fold( + fun + (_, null, Acc) -> Acc; + (<<"task_id">>, TaskId, Acc) -> Acc#{task_id => TaskId}; + (<<"process_id">>, ProcessId, Acc) -> Acc#{process_id => ProcessId}; + (<<"task_type">>, TaskType, Acc) -> Acc#{task_type => TaskType}; + (<<"status">>, Status, Acc) -> Acc#{status => Status}; + (<<"scheduled_time">>, Ts, Acc) -> Acc#{scheduled_time => Ts}; + (<<"running_time">>, Ts, Acc) -> Acc#{running_time => Ts}; + (<<"args">>, Args, Acc) -> Acc#{args => Args}; + (<<"generation">>, Gen, Acc) -> Acc#{generation => Gen}; + (<<"metadata">>, MetaData, Acc) -> Acc#{metadata => MetaData}; + (<<"idempotency_key">>, IdempotencyKey, Acc) -> Acc#{idempotency_key => IdempotencyKey}; + (<<"response">>, Response, Acc) -> Acc#{response => Response}; + (<<"blocked_task">>, BlockedTaskId, Acc) -> Acc#{blocked_task => BlockedTaskId}; + (<<"last_retry_interval">>, LastRetryInterval, Acc) -> Acc#{last_retry_interval => LastRetryInterval}; + (<<"attempts_count">>, AttemptsCount, Acc) -> Acc#{attempts_count => AttemptsCount}; + (<<"context">>, Context, Acc) -> Acc#{context => Context}; + (_, _, Acc) -> Acc + end, + #{}, + Task ). -spec convert(_, _) -> _. diff --git a/test/prg_base_SUITE.erl b/test/prg_base_SUITE.erl index 29d6382..cbb1eaf 100644 --- a/test/prg_base_SUITE.erl +++ b/test/prg_base_SUITE.erl @@ -15,7 +15,7 @@ %% Tests -export([simple_timers_test/1]). -export([simple_call_test/1]). --export([simple_call_with_range_test/1]). +-export([simple_call_with_generation_test/1]). -export([call_replace_timer_test/1]). -export([call_unset_timer_test/1]). -export([postponed_call_test/1]). @@ -27,13 +27,22 @@ -export([repair_after_call_error_test/1]). -export([remove_by_timer_test/1]). -export([remove_without_timer_test/1]). --export([put_process_test/1]). --export([put_process_zombie_test/1]). --export([put_process_with_timeout_test/1]). --export([put_process_with_remove_test/1]). -define(NS(C), proplists:get_value(ns_id, C, 'default/default')). -define(AWAIT_TIMEOUT(C), proplists:get_value(repl_timeout, C, 0)). +-define(PROCESS_EXPECTED(Id, StateGeneration, CurrentGeneration, Status), #{ + status := Status, + state := #{ + timestamp := _, + metadata := #{<<"format_version">> := 1}, + process_id := Id, + task_id := _, + generation := StateGeneration, + payload := _ + }, + process_id := Id, + current_generation := CurrentGeneration +}). init_per_suite(Config) -> Config. @@ -60,7 +69,7 @@ groups() -> {base, [], [ simple_timers_test, simple_call_test, - simple_call_with_range_test, + simple_call_with_generation_test, call_replace_timer_test, call_unset_timer_test, postponed_call_test, @@ -71,11 +80,7 @@ groups() -> error_after_max_retries_test, repair_after_call_error_test, remove_by_timer_test, - remove_without_timer_test, - put_process_test, - put_process_zombie_test, - put_process_with_timeout_test, - put_process_with_remove_test + remove_without_timer_test ]}, {cache, [], [ {group, base} @@ -85,10 +90,10 @@ groups() -> -spec simple_timers_test(_) -> _. simple_timers_test(C) -> %% steps: - %% step aux_state events action - %% 1. init -> aux_state1, [event1], timer 2s - %% 2. timeout -> aux_state2, [event2], timer 0s - %% 3. timeout -> undefined, [], undefined + %% step aux_state state action + %% 1. init -> aux_state1, state1, timer 2s + %% 2. timeout -> aux_state2, state2, timer 0s + %% 3. timeout -> undefined, state3, undefined _ = mock_processor(simple_timers_test), Id = gen_id(), {ok, ok} = progressor:init(#{ns => ?NS(C), id => Id, args => <<"init_args">>}), @@ -96,24 +101,19 @@ simple_timers_test(C) -> ExpectedAux = erlang:term_to_binary(<<"aux_state2">>), timer:sleep(?AWAIT_TIMEOUT(C)), {ok, #{ - process_id := Id, status := <<"running">>, - aux_state := ExpectedAux, + state := #{ + timestamp := _, + metadata := #{<<"format_version">> := 1}, + process_id := Id, + task_id := _, + generation := 3, + payload := _ + }, metadata := #{<<"k">> := <<"v">>}, - history := [ - #{ - event_id := 1, - metadata := #{<<"format_version">> := 1}, - payload := _Pl1, - timestamp := _Ts1 - }, - #{ - event_id := 2, - metadata := #{<<"format_version">> := 1}, - payload := _Pl2, - timestamp := _Ts2 - } - ] + process_id := Id, + aux_state := ExpectedAux, + current_generation := 3 }} = progressor:get(#{ns => ?NS(C), id => Id}), unmock_processor(), ok. @@ -121,9 +121,9 @@ simple_timers_test(C) -> -spec simple_call_test(_) -> _. simple_call_test(C) -> %% steps: - %% 1. init -> [event1], timer 2s - %% 2. call -> [event2], undefined (duration 3s) - %% 3. timeout -> [event3], undefined + %% 1. init -> state1, timer 2s + %% 2. call -> state2, undefined (duration 3s) + %% 3. timeout -> state3, undefined _ = mock_processor(simple_call_test), Id = gen_id(), {ok, ok} = progressor:init(#{ns => ?NS(C), id => Id, args => <<"init_args">>}), @@ -131,92 +131,80 @@ simple_call_test(C) -> 3 = expect_steps_counter(3), timer:sleep(?AWAIT_TIMEOUT(C)), {ok, #{ - process_id := Id, status := <<"running">>, - history := [ - #{ - event_id := 1, - metadata := #{<<"format_version">> := 1}, - payload := _Pl1, - timestamp := _Ts1 - }, - #{ - event_id := 2, - metadata := #{<<"format_version">> := 1}, - payload := _Pl2, - timestamp := _Ts2 - }, - #{ - event_id := 3, - metadata := #{<<"format_version">> := 1}, - payload := _Pl3, - timestamp := _Ts3 - } - ] + state := #{ + timestamp := _, + metadata := #{<<"format_version">> := 1}, + process_id := Id, + task_id := _, + generation := 3, + payload := _ + }, + process_id := Id, + current_generation := 3 }} = progressor:get(#{ns => ?NS(C), id => Id}), unmock_processor(), ok. %% --spec simple_call_with_range_test(_) -> _. -simple_call_with_range_test(C) -> +-spec simple_call_with_generation_test(_) -> _. +simple_call_with_generation_test(C) -> %% steps: - %% 1. init -> [event1, event2, event3, event4], timer 2s - %% 2. call range limit 2 offset 1 -> [event5], timer 0s - %% 2. call range limit 2 offset 5 back -> [event6], timer 0s - %% 3. timeout -> [event7], undefined - _ = mock_processor(simple_call_with_range_test), + %% 1. init -> state1, timer 2s + %% 2. call generation 1 -> state2, timer 0s + %% 2. call generation 1 -> state3, timer 0s + %% 3. timeout -> state4, undefined + _ = mock_processor(simple_call_with_generation_test), Id = gen_id(), {ok, ok} = progressor:init(#{ns => ?NS(C), id => Id, args => <<"init_args">>}), {ok, <<"response">>} = progressor:call(#{ ns => ?NS(C), id => Id, args => <<"call_args">>, - range => #{offset => 1, limit => 2} + generation => 1 }), {ok, <<"response">>} = progressor:call(#{ ns => ?NS(C), id => Id, args => <<"call_args_back">>, - range => #{offset => 5, limit => 2, direction => backward} + generation => 1 }), 4 = expect_steps_counter(4), timer:sleep(?AWAIT_TIMEOUT(C)), {ok, #{ - process_id := Id, status := <<"running">>, - history := [ - #{event_id := 1}, - #{event_id := 2}, - #{event_id := 3}, - #{event_id := 4}, - #{event_id := 5}, - #{event_id := 6}, - #{event_id := 7} - ] + state := #{ + timestamp := _, + metadata := #{<<"format_version">> := 1}, + process_id := Id, + task_id := _, + generation := 4, + payload := _ + }, + process_id := Id, + current_generation := 4 }} = progressor:get(#{ns => ?NS(C), id => Id}), {ok, #{ - process_id := Id, status := <<"running">>, - range := #{offset := 6, direction := backward}, - last_event_id := 7, - history := [ - #{event_id := 5}, - #{event_id := 4}, - #{event_id := 3}, - #{event_id := 2}, - #{event_id := 1} - ] - }} = progressor:get(#{ns => ?NS(C), id => Id, range => #{offset => 6, direction => backward}}), + state := #{ + timestamp := _, + metadata := #{<<"format_version">> := 1}, + process_id := Id, + task_id := _, + generation := 2, + payload := _ + }, + process_id := Id, + current_generation := 4 + }} = progressor:get(#{ns => ?NS(C), id => Id, generation => 2}), unmock_processor(), ok. %% - -spec call_replace_timer_test(_) -> _. call_replace_timer_test(C) -> %% steps: - %% 1. init -> [event1], timer 2s + remove - %% 2. call -> [], timer 0s (new timer cancel remove) - %% 3. timeout -> [event2], undefined + %% 1. init -> state1, timer 2s + remove + %% 2. call -> state2, timer 0s (new timer cancel remove) + %% 3. timeout -> state3, undefined _ = mock_processor(call_replace_timer_test), Id = gen_id(), {ok, ok} = progressor:init(#{ns => ?NS(C), id => Id, args => <<"init_args">>}), @@ -225,22 +213,17 @@ call_replace_timer_test(C) -> %% wait task_scan_timeout, maybe remove works timer:sleep(4000), {ok, #{ - process_id := Id, status := <<"running">>, - history := [ - #{ - event_id := 1, - metadata := #{<<"format_version">> := 1}, - payload := _Pl1, - timestamp := _Ts1 - }, - #{ - event_id := 2, - metadata := #{<<"format_version">> := 1}, - payload := _Pl2, - timestamp := _Ts2 - } - ] + state := #{ + timestamp := _, + metadata := #{<<"format_version">> := 1}, + process_id := Id, + task_id := _, + generation := 3, + payload := _ + }, + process_id := Id, + current_generation := 3 }} = progressor:get(#{ns => ?NS(C), id => Id}), unmock_processor(), ok. @@ -248,7 +231,7 @@ call_replace_timer_test(C) -> -spec call_unset_timer_test(_) -> _. call_unset_timer_test(C) -> %% steps: - %% 1. init -> [event1], timer 2s + %% 1. init -> state1, timer 2s %% 2. call -> [], unset_timer _ = mock_processor(call_unset_timer_test), Id = gen_id(), @@ -258,16 +241,17 @@ call_unset_timer_test(C) -> 2 = expect_steps_counter(3), timer:sleep(?AWAIT_TIMEOUT(C)), {ok, #{ - process_id := Id, status := <<"running">>, - history := [ - #{ - event_id := 1, - metadata := #{<<"format_version">> := 1}, - payload := _Pl1, - timestamp := _Ts1 - } - ] + state := #{ + timestamp := _, + metadata := #{<<"format_version">> := 1}, + process_id := Id, + task_id := _, + generation := 2, + payload := _ + }, + process_id := Id, + current_generation := 2 }} = progressor:get(#{ns => ?NS(C), id => Id}), unmock_processor(), ok. @@ -276,10 +260,10 @@ call_unset_timer_test(C) -> postponed_call_test(C) -> %% call between 0 sec timers %% steps: - %% 1. init -> [], timer 0s - %% 2. timeout -> [event1], timer 0s (process duration 3000) - %% 3. call -> [event2], undefined - %% 4. timeout -> [event3], undefined + %% 1. init -> state1, timer 0s + %% 2. timeout -> state2, timer 0s (process duration 3000) + %% 3. call -> state3, undefined + %% 4. timeout -> state4, undefined _ = mock_processor(postponed_call_test), Id = gen_id(), {ok, ok} = progressor:init(#{ns => ?NS(C), id => Id, args => <<"init_args">>}), @@ -287,74 +271,44 @@ postponed_call_test(C) -> 4 = expect_steps_counter(4), timer:sleep(?AWAIT_TIMEOUT(C)), {ok, #{ - process_id := Id, status := <<"running">>, - history := [ - #{ - event_id := 1, - metadata := #{<<"format_version">> := 1}, - payload := _Pl1, - timestamp := _Ts1 - }, - #{ - event_id := 2, - metadata := #{<<"format_version">> := 1}, - payload := _Pl2, - timestamp := _Ts2 - }, - #{ - event_id := 3, - metadata := #{<<"format_version">> := 1}, - payload := _Pl3, - timestamp := _Ts3 - } - ] + state := #{ + timestamp := _, + metadata := #{<<"format_version">> := 1}, + process_id := Id, + task_id := _, + generation := 4, + payload := _ + }, + process_id := Id, + current_generation := 4 }} = progressor:get(#{ns => ?NS(C), id => Id}), unmock_processor(), ok. %% -spec postponed_call_to_suspended_process_test(_) -> _. postponed_call_to_suspended_process_test(C) -> - %% call between 0 sec timers %% steps: - %% 1. init -> [], timer 0s - %% 2. timeout -> [event1], undefined (process duration 3000) - %% 3. call -> [event2], undefined + %% 1. init -> state1, timer 0s + %% 2. timeout -> state2, undefined (process duration 3000) + %% 3. call -> state3, undefined _ = mock_processor(postponed_call_to_suspended_process_test), Id = gen_id(), {ok, ok} = progressor:init(#{ns => ?NS(C), id => Id, args => <<"init_args">>}), {ok, <<"response">>} = progressor:call(#{ns => ?NS(C), id => Id, args => <<"call_args">>}), 3 = expect_steps_counter(3), timer:sleep(?AWAIT_TIMEOUT(C)), - {ok, #{ - process_id := Id, - status := <<"running">>, - history := [ - #{ - event_id := 1, - metadata := #{<<"format_version">> := 1}, - payload := _Pl1, - timestamp := _Ts1 - }, - #{ - event_id := 2, - metadata := #{<<"format_version">> := 1}, - payload := _Pl2, - timestamp := _Ts2 - } - ] - }} = progressor:get(#{ns => ?NS(C), id => Id}), + {ok, ?PROCESS_EXPECTED(Id, 3, 3, <<"running">>)} = progressor:get(#{ns => ?NS(C), id => Id}), unmock_processor(), ok. %% -spec multiple_calls_test(_) -> _. multiple_calls_test(C) -> - %% call between 0 sec timers %% steps: - %% 1. init -> [], undefined - %% 2. call -> [event1], undefined + %% 1. init -> state1, undefined + %% 2. call -> state2, undefined %% ... - %% 11. call -> [event10], undefined + %% 11. call -> state11, undefined _ = mock_processor(multiple_calls_test), Id = gen_id(), {ok, ok} = progressor:init(#{ns => ?NS(C), id => Id, args => <<"init_args">>}), @@ -366,117 +320,71 @@ multiple_calls_test(C) -> ), 11 = expect_steps_counter(33), timer:sleep(?AWAIT_TIMEOUT(C)), - {ok, #{ - process_id := Id, - status := <<"running">>, - history := [ - #{event_id := 1}, - #{event_id := 2}, - #{event_id := 3}, - #{event_id := 4}, - #{event_id := 5}, - #{event_id := 6}, - #{event_id := 7}, - #{event_id := 8}, - #{event_id := 9}, - #{event_id := 10} - ] - }} = progressor:get(#{ns => ?NS(C), id => Id}), + {ok, ?PROCESS_EXPECTED(Id, 11, 11, <<"running">>)} = progressor:get(#{ns => ?NS(C), id => Id}), unmock_processor(), ok. -spec simple_repair_after_non_retriable_error_test(_) -> _. simple_repair_after_non_retriable_error_test(C) -> %% steps: - %% 1. init -> [], timer 0s + %% 1. init -> state1, timer 0s %% 2. timeout -> {error, do_not_retry} - %% 3. timeout(via simple repair call) -> [event1], undefined - %% 4. timeout -> [event2], undefined + %% 3. timeout(via simple repair call) -> state2, undefined + %% 4. timeout -> state3, undefined _ = mock_processor(simple_repair_after_non_retriable_error_test), Id = gen_id(), {ok, ok} = progressor:init(#{ns => ?NS(C), id => Id, args => <<"init_args">>}), 2 = expect_steps_counter(2), timer:sleep(?AWAIT_TIMEOUT(C)), {ok, #{ - detail := <<"do_not_retry">>, - history := [], + status := <<"error">>, + state := #{generation := 1}, process_id := Id, - status := <<"error">> + current_generation := 1, + detail := <<"do_not_retry">>, + corrupted_by := _ }} = progressor:get(#{ns => ?NS(C), id => Id}), {ok, ok} = progressor:simple_repair(#{ns => ?NS(C), id => Id, context => <<"simple_repair_ctx">>}), 4 = expect_steps_counter(4), timer:sleep(?AWAIT_TIMEOUT(C)), - {ok, - #{ - process_id := Id, - status := <<"running">>, - history := [ - #{ - event_id := 1, - metadata := #{<<"format_version">> := 1}, - payload := _Pl1, - timestamp := _Ts1 - }, - #{ - event_id := 2, - metadata := #{<<"format_version">> := 1}, - payload := _Pl2, - timestamp := _Ts2 - } - ] - } = Process} = progressor:get(#{ns => ?NS(C), id => Id}), + {ok, ?PROCESS_EXPECTED(Id, 3, 3, <<"running">>) = Process} = progressor:get(#{ns => ?NS(C), id => Id}), false = erlang:is_map_key(detail, Process), + false = erlang:is_map_key(corrupted_by, Process), unmock_processor(), ok. -spec repair_after_non_retriable_error_test(_) -> _. repair_after_non_retriable_error_test(C) -> %% steps: - %% 1. init -> [], timer 0s + %% 1. init -> state1, timer 0s %% 2. timeout -> {error, do_not_retry} - %% 3. repair -> [event1], undefined - %% 4. timeout -> [event2], undefined + %% 3. repair -> state2, undefined _ = mock_processor(repair_after_non_retriable_error_test), Id = gen_id(), {ok, ok} = progressor:init(#{ns => ?NS(C), id => Id, args => <<"init_args">>}), 2 = expect_steps_counter(2), timer:sleep(?AWAIT_TIMEOUT(C)), {ok, #{ - detail := <<"do_not_retry">>, - history := [], + status := <<"error">>, + state := #{generation := 1}, process_id := Id, - status := <<"error">> + current_generation := 1, + detail := <<"do_not_retry">>, + corrupted_by := _ }} = progressor:get(#{ns => ?NS(C), id => Id}), {ok, ok} = progressor:repair(#{ns => ?NS(C), id => Id, args => <<"repair_args">>}), - 4 = expect_steps_counter(4), + 3 = expect_steps_counter(4), timer:sleep(?AWAIT_TIMEOUT(C)), - {ok, - #{ - process_id := Id, - status := <<"running">>, - history := [ - #{ - event_id := 1, - metadata := #{<<"format_version">> := 1}, - payload := _Pl1, - timestamp := _Ts1 - }, - #{ - event_id := 2, - metadata := #{<<"format_version">> := 1}, - payload := _Pl2, - timestamp := _Ts2 - } - ] - } = Process} = progressor:get(#{ns => ?NS(C), id => Id}), + {ok, ?PROCESS_EXPECTED(Id, 2, 2, <<"running">>) = Process} = progressor:get(#{ns => ?NS(C), id => Id}), false = erlang:is_map_key(detail, Process), + false = erlang:is_map_key(corrupted_by, Process), unmock_processor(), ok. %% -spec error_after_max_retries_test(_) -> _. error_after_max_retries_test(C) -> %% steps: - %% 1. init -> [], timer 0s + %% 1. init -> state1, timer 0s %% 2. timeout -> {error, retry_this} %% 3. timeout -> {error, retry_this} %% 4. timeout -> {error, retry_this} @@ -486,10 +394,12 @@ error_after_max_retries_test(C) -> 4 = expect_steps_counter(4), timer:sleep(?AWAIT_TIMEOUT(C)), {ok, #{ - detail := <<"retry_this">>, - history := [], + status := <<"error">>, + state := #{generation := 1}, process_id := Id, - status := <<"error">> + current_generation := 1, + detail := <<"retry_this">>, + corrupted_by := _ }} = progressor:get(#{ns => ?NS(C), id => Id}), unmock_processor(), ok. @@ -508,11 +418,12 @@ repair_after_call_error_test(C) -> 2 = expect_steps_counter(2), timer:sleep(?AWAIT_TIMEOUT(C)), {ok, #{ - detail := <<"retry_this">>, - metadata := #{<<"k">> := <<"v">>}, - history := [], + status := <<"error">>, + state := #{generation := 1}, process_id := Id, - status := <<"error">> + current_generation := 1, + detail := <<"retry_this">>, + corrupted_by := CorruptionTask }} = progressor:get(#{ns => ?NS(C), id => Id}), {error, <<"repair_error">>} = progressor:repair(#{ ns => ?NS(C), id => Id, args => <<"bad_repair_args">> @@ -521,57 +432,32 @@ repair_after_call_error_test(C) -> timer:sleep(?AWAIT_TIMEOUT(C)), %% shoul not rewrite detail {ok, #{ - detail := <<"retry_this">>, - metadata := #{<<"k">> := <<"v">>}, - history := [], + status := <<"error">>, + state := #{generation := 1}, process_id := Id, - status := <<"error">> + current_generation := 1, + detail := <<"retry_this">>, + corrupted_by := CorruptionTask }} = progressor:get(#{ns => ?NS(C), id => Id}), {ok, ok} = progressor:repair(#{ns => ?NS(C), id => Id, args => <<"repair_args">>}), 4 = expect_steps_counter(4), timer:sleep(?AWAIT_TIMEOUT(C)), - {ok, #{ - process_id := Id, - status := <<"running">>, - metadata := #{<<"k2">> := <<"v2">>}, - history := [ - #{ - event_id := 1, - metadata := #{<<"format_version">> := 1}, - payload := _Pl1, - timestamp := _Ts1 - } - ] - }} = progressor:get(#{ns => ?NS(C), id => Id}), + {ok, ?PROCESS_EXPECTED(Id, 2, 2, <<"running">>) = Process} = progressor:get(#{ns => ?NS(C), id => Id}), + false = erlang:is_map_key(detail, Process), + false = erlang:is_map_key(corrupted_by, Process), + ?assertEqual(#{<<"k2">> => <<"v2">>}, maps:get(metadata, Process)), unmock_processor(), ok. %% -spec remove_by_timer_test(_) -> _. remove_by_timer_test(C) -> %% steps: - %% 1. init -> [event1, event2], timer 2s + remove + %% 1. init -> state1, timer 2s + remove _ = mock_processor(remove_by_timer_test), Id = gen_id(), {ok, ok} = progressor:init(#{ns => ?NS(C), id => Id, args => <<"init_args">>}), timer:sleep(?AWAIT_TIMEOUT(C)), - {ok, #{ - process_id := Id, - status := <<"running">>, - history := [ - #{ - event_id := 1, - metadata := #{<<"format_version">> := 1}, - payload := _Pl1, - timestamp := _Ts1 - }, - #{ - event_id := 2, - metadata := #{<<"format_version">> := 1}, - payload := _Pl2, - timestamp := _Ts2 - } - ] - }} = progressor:get(#{ns => ?NS(C), id => Id}), + {ok, ?PROCESS_EXPECTED(Id, 1, 1, <<"running">>)} = progressor:get(#{ns => ?NS(C), id => Id}), %% wait tsk_scan_timeout timer:sleep(4000), {error, <<"process not found">>} = progressor:get(#{ns => ?NS(C), id => Id}), @@ -581,200 +467,18 @@ remove_by_timer_test(C) -> -spec remove_without_timer_test(_) -> _. remove_without_timer_test(C) -> %% steps: - %% 1. init -> [event1], timer 2s - %% 2. timeout -> [], remove + %% 1. init -> state1, timer 2s + %% 2. timeout -> state2, remove _ = mock_processor(remove_without_timer_test), Id = gen_id(), {ok, ok} = progressor:init(#{ns => ?NS(C), id => Id, args => <<"init_args">>}), timer:sleep(?AWAIT_TIMEOUT(C)), - {ok, #{ - process_id := Id, - status := <<"running">>, - history := [ - #{ - event_id := 1, - metadata := #{<<"format_version">> := 1}, - payload := _Pl1, - timestamp := _Ts1 - } - ] - }} = progressor:get(#{ns => ?NS(C), id => Id}), + {ok, ?PROCESS_EXPECTED(Id, 1, 1, <<"running">>)} = progressor:get(#{ns => ?NS(C), id => Id}), 2 = expect_steps_counter(2), timer:sleep(?AWAIT_TIMEOUT(C)), {error, <<"process not found">>} = progressor:get(#{ns => ?NS(C), id => Id}), unmock_processor(), ok. -%% --spec put_process_test(_) -> _. -put_process_test(C) -> - Id = gen_id(), - Args = #{ - process => #{ - process_id => Id, - status => <<"running">>, - history => [ - event(1), - event(2), - event(3) - ] - } - }, - {ok, ok} = progressor:put(#{ns => ?NS(C), id => Id, args => Args}), - timer:sleep(?AWAIT_TIMEOUT(C)), - {ok, #{ - process_id := Id, - status := <<"running">>, - history := [ - #{ - metadata := #{<<"format_version">> := 1}, - process_id := Id, - event_id := 1, - timestamp := _Ts1, - payload := _Pl1 - }, - #{ - timestamp := _Ts2, - metadata := #{<<"format_version">> := 1}, - process_id := Id, - event_id := 2, - payload := _Pl2 - }, - #{ - timestamp := _Ts3, - metadata := #{<<"format_version">> := 1}, - process_id := Id, - event_id := 3, - payload := _Pl3 - } - ] - }} = progressor:get(#{ns => ?NS(C), id => Id}), - - {error, <<"process already exists">>} = progressor:put(#{ns => ?NS(C), id => Id, args => Args}), - ok. -%% --spec put_process_with_timeout_test(_) -> _. -put_process_with_timeout_test(C) -> - %% steps: - %% 1. put -> [event1], timer 1s - %% 2. timeout -> [event2], undefined - _ = mock_processor(put_process_with_timeout_test), - Id = gen_id(), - Args = #{ - process => #{ - process_id => Id, - status => <<"running">>, - history => [event(1)] - }, - action => #{set_timer => erlang:system_time(second) + 1} - }, - {ok, ok} = progressor:put(#{ns => ?NS(C), id => Id, args => Args}), - timer:sleep(?AWAIT_TIMEOUT(C)), - {ok, #{ - process_id := Id, - status := <<"running">>, - history := [#{event_id := 1}] - }} = progressor:get(#{ns => ?NS(C), id => Id}), - 1 = expect_steps_counter(1), - timer:sleep(?AWAIT_TIMEOUT(C)), - {ok, #{ - process_id := Id, - status := <<"running">>, - history := [#{event_id := 1}, #{event_id := 2}] - }} = progressor:get(#{ns => ?NS(C), id => Id}), - unmock_processor(), - ok. -%% --spec put_process_zombie_test(_) -> _. -put_process_zombie_test(C) -> - %% steps: - %% 1. put -> [event1], timer 1s - %% 2. insert running task from past - %% 3. zombie collecttion - Id = gen_id(), - Args = #{ - process => #{ - process_id => Id, - status => <<"running">>, - history => [event(1)] - } - }, - {ok, ok} = progressor:put(#{ns => ?NS(C), id => Id, args => Args}), - Now = erlang:system_time(second), - ZombieTs = prg_utils:unixtime_to_datetime(Now - 30), - NS = erlang:atom_to_list(?NS(C)), - %% TODO: rework it via storage backend - %% START SQL INJECTION - {ok, _, _, [{TaskId}]} = epg_pool:query( - default_pool, - "INSERT INTO \"" ++ NS ++ - "_tasks\" " - " (process_id, task_type, status, scheduled_time, running_time, args, last_retry_interval, attempts_count)" - " VALUES ($1, $2, $3, $4, $5, $6, $7, $8) RETURNING task_id", - [ - Id, - <<"timeout">>, - <<"running">>, - ZombieTs, - ZombieTs, - <<>>, - 0, - 0 - ] - ), - {ok, 1} = epg_pool:query( - default_pool, - "INSERT INTO \"" ++ NS ++ - "_running\" " - " (task_id, process_id, task_type, status, scheduled_time, running_time, args, " - " last_retry_interval, attempts_count)" - " VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)", - [ - TaskId, - Id, - <<"timeout">>, - <<"running">>, - ZombieTs, - ZombieTs, - <<>>, - 0, - 0 - ] - ), - %% END SQL INJECTION - - %% await zombie collection (process step timeout (10s) + random part (2s)) - timer:sleep(12010), - {ok, #{ - process_id := Id, - status := <<"error">>, - detail := <<"zombie detected">> - }} = progressor:get(#{ns => ?NS(C), id => Id}), - ok. -%% --spec put_process_with_remove_test(_) -> _. -put_process_with_remove_test(C) -> - %% steps: - %% 1. put -> [event1], remove 1s - %% 2. remove - Id = gen_id(), - Args = #{ - process => #{ - process_id => Id, - status => <<"running">>, - history => [event(1)] - }, - action => #{set_timer => erlang:system_time(second) + 1, remove => true} - }, - {ok, ok} = progressor:put(#{ns => ?NS(C), id => Id, args => Args}), - timer:sleep(?AWAIT_TIMEOUT(C)), - {ok, #{ - process_id := Id, - status := <<"running">>, - history := [#{event_id := 1}] - }} = progressor:get(#{ns => ?NS(C), id => Id}), - timer:sleep(3000), - {error, <<"process not found">>} = progressor:get(#{ns => ?NS(C), id => Id}), - ok. %%%%%%%%%%%%%%%%%%%%% %% Internal functions @@ -782,21 +486,17 @@ put_process_with_remove_test(C) -> mock_processor(simple_timers_test = TestCase) -> Self = self(), - MockProcessor = fun({_Type, _Args, #{history := History} = _Process}, _Opts, _Ctx) -> - case erlang:length(History) of - 0 -> + MockProcessor = fun({_Type, _Args, Process}, _Opts, _Ctx) -> + case Process of + #{current_generation := 2, state := #{generation := 2}} -> Result = #{ - events => [event(1)], - metadata => #{<<"k">> => <<"v">>}, - %% postponed timer - action => #{set_timer => erlang:system_time(second) + 2}, - aux_state => erlang:term_to_binary(<<"aux_state1">>) + state => state() }, - Self ! 1, + Self ! 3, {ok, Result}; - 1 -> + #{current_generation := 1, state := #{generation := 1}} -> Result = #{ - events => [event(2)], + state => state(), %% continuation timer action => #{set_timer => erlang:system_time(second)}, aux_state => erlang:term_to_binary(<<"aux_state2">>) @@ -805,9 +505,13 @@ mock_processor(simple_timers_test = TestCase) -> {ok, Result}; _ -> Result = #{ - events => [] + state => state(), + metadata => #{<<"k">> => <<"v">>}, + %% postponed timer + action => #{set_timer => erlang:system_time(second) + 2}, + aux_state => erlang:term_to_binary(<<"aux_state1">>) }, - Self ! 3, + Self ! 1, {ok, Result} end end, @@ -818,75 +522,68 @@ mock_processor(simple_call_test = TestCase) -> MockProcessor = fun ({init, <<"init_args">>, _Process}, _Opts, _Ctx) -> Result = #{ - events => [event(1)], + state => state(), action => #{set_timer => erlang:system_time(second) + 2} }, Self ! 1, {ok, Result}; ({call, <<"call_args">>, _Process}, _Opts, _Ctx) -> - %% call when process suspended (wait timeout) + %% call when process suspended (deferred timer while call processed) timer:sleep(3000), Result = #{ response => <<"response">>, - events => [event(2)] + state => state() }, Self ! 2, {ok, Result}; - ({timeout, <<>>, #{history := History} = _Process}, _Opts, _Ctx) -> + ({timeout, <<>>, #{current_generation := 2} = _Process}, _Opts, _Ctx) -> %% timeout after call processing - ?assertEqual(2, erlang:length(History)), Result = #{ - events => [event(3)] + state => state() }, Self ! 3, {ok, Result} end, mock_processor(TestCase, MockProcessor); %% -mock_processor(simple_call_with_range_test = TestCase) -> +mock_processor(simple_call_with_generation_test = TestCase) -> Self = self(), MockProcessor = fun - ({init, <<"init_args">>, Process}, _Opts, _Ctx) -> - ?assertEqual(0, maps:get(last_event_id, Process)), + ({init, <<"init_args">>, _Process}, _Opts, _Ctx) -> Result = #{ - events => [event(1), event(2), event(3), event(4)] + state => state() }, Self ! 1, {ok, Result}; - ({call, <<"call_args">>, #{history := History} = Process}, _Opts, _Ctx) -> - %% call with range limit=2, offset=1 - ?assertEqual(2, erlang:length(History)), - ?assertEqual(4, maps:get(last_event_id, Process)), - [ - #{event_id := 2}, - #{event_id := 3} - ] = History, + ({call, <<"call_args">>, #{current_generation := 1, state := #{generation := 1}} = _Process}, _Opts, _Ctx) -> + %% call with generation=1 when current_generation=1 Result = #{ response => <<"response">>, - events => [event(5)] + state => state() }, Self ! 2, {ok, Result}; - ({call, <<"call_args_back">>, #{history := History} = Process}, _Opts, _Ctx) -> - %% call with range limit=2, offset=5 direction=backward - ?assertEqual(2, erlang:length(History)), - ?assertEqual(5, maps:get(last_event_id, Process)), - [ - #{event_id := 4}, - #{event_id := 3} - ] = History, + ( + { + call, + <<"call_args_back">>, + #{current_generation := 2, state := #{generation := 1}} = _Process + }, + _Opts, + _Ctx + ) -> + %% call with generation=1 when current_generation=2 Result = #{ response => <<"response">>, - events => [event(6)], + state => state(), action => #{set_timer => erlang:system_time(second)} }, Self ! 3, {ok, Result}; - ({timeout, <<>>, #{history := History} = Process}, _Opts, _Ctx) -> - ?assertEqual(6, erlang:length(History)), - ?assertEqual(6, maps:get(last_event_id, Process)), + ({timeout, <<>>, #{current_generation := 3, state := #{generation := 3}} = _Process}, _Opts, _Ctx) -> + %% timeout task executes on last generation Result = #{ - events => [event(7)] + state => state() }, Self ! 4, {ok, Result} @@ -898,25 +595,24 @@ mock_processor(call_replace_timer_test = TestCase) -> MockProcessor = fun ({init, <<"init_args">>, _Process}, _Opts, _Ctx) -> Result = #{ - events => [event(1)], + state => state(), action => #{set_timer => erlang:system_time(second) + 2, remove => true} }, Self ! 1, {ok, Result}; ({call, <<"call_args">>, _Process}, _Opts, _Ctx) -> - %% call when process suspended (wait timeout) + %% call when process suspended Result = #{ response => <<"response">>, - events => [], + state => state(), action => #{set_timer => erlang:system_time(second)} }, Self ! 2, {ok, Result}; - ({timeout, <<>>, #{history := History} = _Process}, _Opts, _Ctx) -> + ({timeout, <<>>, #{current_generation := 2} = _Process}, _Opts, _Ctx) -> %% timeout after call processing (remove action was cancelled by call action) - ?assertEqual(1, erlang:length(History)), Result = #{ - events => [event(2)] + state => state() }, Self ! 3, {ok, Result} @@ -928,7 +624,7 @@ mock_processor(call_unset_timer_test = TestCase) -> MockProcessor = fun ({init, <<"init_args">>, _Process}, _Opts, _Ctx) -> Result = #{ - events => [event(1)], + state => state(), action => #{set_timer => erlang:system_time(second) + 2} }, Self ! 1, @@ -937,16 +633,14 @@ mock_processor(call_unset_timer_test = TestCase) -> %% call when process suspended (wait timeout) Result = #{ response => <<"response">>, - events => [], + state => state(), action => unset_timer }, Self ! 2, {ok, Result}; - ({timeout, <<>>, #{history := History} = _Process}, _Opts, _Ctx) -> - %% timeout after call processing (should not work!) - ?assertEqual(2, erlang:length(History)), + ({timeout, <<>>, #{current_generation := 2} = _Process}, _Opts, _Ctx) -> Result = #{ - events => [event(3)] + state => state() }, Self ! 3, {ok, Result} @@ -958,31 +652,29 @@ mock_processor(postponed_call_test = TestCase) -> MockProcessor = fun ({init, <<"init_args">>, _Process}, _Opts, _Ctx) -> Result = #{ - events => [], + state => state(), action => #{set_timer => erlang:system_time(second)} }, Self ! 1, {ok, Result}; - ({timeout, <<>>, #{history := []} = _Process}, _Opts, _Ctx) -> + ({timeout, <<>>, #{current_generation := 1} = _Process}, _Opts, _Ctx) -> timer:sleep(3000), Result = #{ - events => [event(1)], + state => state(), action => #{set_timer => erlang:system_time(second)} }, Self ! 2, {ok, Result}; - ({call, <<"call_args">>, #{history := History} = _Process}, _Opts, _Ctx) -> - ?assertEqual(1, erlang:length(History)), + ({call, <<"call_args">>, #{current_generation := 2} = _Process}, _Opts, _Ctx) -> Result = #{ response => <<"response">>, - events => [event(2)] + state => state() }, Self ! 3, {ok, Result}; - ({timeout, <<>>, #{history := History} = _Process}, _Opts, _Ctx) -> - ?assertEqual(2, erlang:length(History)), + ({timeout, <<>>, #{current_generation := 3} = _Process}, _Opts, _Ctx) -> Result = #{ - events => [event(3)] + state => state() }, Self ! 4, {ok, Result} @@ -994,23 +686,22 @@ mock_processor(postponed_call_to_suspended_process_test = TestCase) -> MockProcessor = fun ({init, <<"init_args">>, _Process}, _Opts, _Ctx) -> Result = #{ - events => [], + state => state(), action => #{set_timer => erlang:system_time(second)} }, Self ! 1, {ok, Result}; - ({timeout, <<>>, #{history := []} = _Process}, _Opts, _Ctx) -> + ({timeout, <<>>, #{current_generation := 1} = _Process}, _Opts, _Ctx) -> timer:sleep(3000), Result = #{ - events => [event(1)] + state => state() }, Self ! 2, {ok, Result}; - ({call, <<"call_args">>, #{history := History} = _Process}, _Opts, _Ctx) -> - ?assertEqual(1, erlang:length(History)), + ({call, <<"call_args">>, #{current_generation := 2} = _Process}, _Opts, _Ctx) -> Result = #{ response => <<"response">>, - events => [event(2)] + state => state() }, Self ! 3, {ok, Result} @@ -1022,15 +713,15 @@ mock_processor(multiple_calls_test = TestCase) -> MockProcessor = fun ({init, <<"init_args">>, _Process}, _Opts, _Ctx) -> Result = #{ - events => [] + state => state() }, Self ! 1, {ok, Result}; - ({call, <>, _Process}, _Opts, _Ctx) -> + ({call, <<_N>>, _Process}, _Opts, _Ctx) -> timer:sleep(100), Result = #{ response => <<"response">>, - events => [event(N)] + state => state() }, Self ! iterate, {ok, Result} @@ -1042,26 +733,25 @@ mock_processor(simple_repair_after_non_retriable_error_test = TestCase) -> MockProcessor = fun ({init, <<"init_args">>, _Process}, _Opts, _Ctx) -> Result = #{ - events => [], + state => state(), action => #{set_timer => erlang:system_time(second)} }, Self ! 1, {ok, Result}; - ({timeout, <<>>, #{history := []} = _Process}, _Opts, <<>>) -> + ({timeout, <<>>, #{current_generation := 1} = _Process}, _Opts, <<>>) -> Self ! 2, {error, do_not_retry}; - ({timeout, <<>>, #{history := []} = _Process}, _Opts, <<"simple_repair_ctx">>) -> + ({timeout, <<>>, #{current_generation := 1} = _Process}, _Opts, <<"simple_repair_ctx">>) -> %% timeout via simple repair Result = #{ - events => [event(1)], + state => state(), action => #{set_timer => erlang:system_time(second)} }, Self ! 3, {ok, Result}; - ({timeout, <<>>, #{history := History} = _Process}, _Opts, _Ctx) -> - ?assertEqual(1, erlang:length(History)), + ({timeout, <<>>, #{current_generation := 2} = _Process}, _Opts, _Ctx) -> Result = #{ - events => [event(2)] + state => state() }, Self ! 4, {ok, Result} @@ -1073,26 +763,19 @@ mock_processor(repair_after_non_retriable_error_test = TestCase) -> MockProcessor = fun ({init, <<"init_args">>, _Process}, _Opts, _Ctx) -> Result = #{ - events => [], + state => state(), action => #{set_timer => erlang:system_time(second)} }, Self ! 1, {ok, Result}; - ({timeout, <<>>, #{history := []} = _Process}, _Opts, _Ctx) -> + ({timeout, <<>>, #{current_generation := 1} = _Process}, _Opts, _Ctx) -> Self ! 2, {error, do_not_retry}; - ({repair, <<"repair_args">>, #{history := []} = _Process}, _Opts, _Ctx) -> + ({repair, <<"repair_args">>, #{current_generation := 1} = _Process}, _Opts, _Ctx) -> Result = #{ - events => [event(1)] + state => state() }, Self ! 3, - {ok, Result}; - ({timeout, <<>>, #{history := History} = _Process}, _Opts, _Ctx) -> - ?assertEqual(1, erlang:length(History)), - Result = #{ - events => [event(2)] - }, - Self ! 4, {ok, Result} end, mock_processor(TestCase, MockProcessor); @@ -1102,12 +785,12 @@ mock_processor(error_after_max_retries_test = TestCase) -> MockProcessor = fun ({init, <<"init_args">>, _Process}, _Opts, _Ctx) -> Result = #{ - events => [], + state => state(), action => #{set_timer => erlang:system_time(second)} }, Self ! 1, {ok, Result}; - ({timeout, <<>>, #{history := []} = _Process}, _Opts, _Ctx) -> + ({timeout, <<>>, #{current_generation := 1} = _Process}, _Opts, _Ctx) -> %% must be 3 attempts Self ! iterate, {error, retry_this} @@ -1120,22 +803,22 @@ mock_processor(repair_after_call_error_test = TestCase) -> ({init, <<"init_args">>, _Process}, _Opts, _Ctx) -> Result = #{ metadata => #{<<"k">> => <<"v">>}, - events => [] + state => state() }, Self ! 1, {ok, Result}; - ({call, <<"call_args">>, #{history := []} = _Process}, _Opts, _Ctx) -> + ({call, <<"call_args">>, #{current_generation := 1} = _Process}, _Opts, _Ctx) -> Self ! 2, %% retriable error for call must be ignore and process set error status {error, retry_this}; - ({repair, <<"bad_repair_args">>, #{history := []} = _Process}, _Opts, _Ctx) -> + ({repair, <<"bad_repair_args">>, #{current_generation := 1} = _Process}, _Opts, _Ctx) -> %% repair error should not rewrite process detail Self ! 3, {error, <<"repair_error">>}; - ({repair, <<"repair_args">>, #{history := []} = _Process}, _Opts, _Ctx) -> + ({repair, <<"repair_args">>, #{current_generation := 1} = _Process}, _Opts, _Ctx) -> Result = #{ metadata => #{<<"k2">> => <<"v2">>}, - events => [event(1)] + state => state() }, Self ! 4, {ok, Result} @@ -1145,7 +828,7 @@ mock_processor(repair_after_call_error_test = TestCase) -> mock_processor(remove_by_timer_test = TestCase) -> MockProcessor = fun({init, <<"init_args">>, _Process}, _Opts, _Ctx) -> Result = #{ - events => [event(1), event(2)], + state => state(), action => #{set_timer => erlang:system_time(second) + 2, remove => true} }, {ok, Result} @@ -1157,28 +840,19 @@ mock_processor(remove_without_timer_test = TestCase) -> MockProcessor = fun ({init, <<"init_args">>, _Process}, _Opts, _Ctx) -> Result = #{ - events => [event(1)], + state => state(), action => #{set_timer => erlang:system_time(second) + 2} }, Self ! 1, {ok, Result}; ({timeout, <<>>, _Process}, _Opts, _Ctx) -> Result = #{ - events => [], + state => state(), action => #{remove => true} }, Self ! 2, {ok, Result} end, - mock_processor(TestCase, MockProcessor); -%% -mock_processor(put_process_with_timeout_test = TestCase) -> - Self = self(), - MockProcessor = fun({timeout, <<>>, _Process}, _Opts, _Ctx) -> - Result = #{events => [event(2)]}, - Self ! 1, - {ok, Result} - end, mock_processor(TestCase, MockProcessor). mock_processor(_TestCase, MockFun) -> @@ -1210,12 +884,10 @@ expect_steps_counter(ExpectedSteps, CurrentStep) -> CurrentStep end. -event(Id) -> +state() -> #{ - event_id => Id, timestamp => erlang:system_time(second), metadata => #{<<"format_version">> => 1}, - %% msg_pack compatibility for kafka payload => erlang:term_to_binary({bin, crypto:strong_rand_bytes(8)}) }. From 7e5186d8d39c80e6caf47cf038faa24ff83df776 Mon Sep 17 00:00:00 2001 From: ttt161 Date: Thu, 11 Sep 2025 10:25:58 +0300 Subject: [PATCH 3/5] fix CI --- .github/workflows/erlang-checks.yaml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.github/workflows/erlang-checks.yaml b/.github/workflows/erlang-checks.yaml index 7d1512b..23ca27d 100644 --- a/.github/workflows/erlang-checks.yaml +++ b/.github/workflows/erlang-checks.yaml @@ -15,7 +15,6 @@ jobs: outputs: otp-version: ${{ steps.otp-version.outputs.version }} rebar-version: ${{ steps.rebar-version.outputs.version }} - thrift-version: ${{ steps.thrift-version.outputs.version }} steps: - name: Checkout repository uses: actions/checkout@v4 @@ -24,8 +23,6 @@ jobs: run: echo "::set-output name=version::$OTP_VERSION" - id: rebar-version run: echo "::set-output name=version::$REBAR_VERSION" - - id: thrift-version - run: echo "::set-output name=version::$THRIFT_VERSION" run: name: Run checks From ff5c5dc1c129a9bb8d8fd97b162030bc48ffc3ad Mon Sep 17 00:00:00 2001 From: ttt161 Date: Thu, 11 Sep 2025 10:29:41 +0300 Subject: [PATCH 4/5] fix CI --- .github/workflows/erlang-checks.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/erlang-checks.yaml b/.github/workflows/erlang-checks.yaml index 23ca27d..531f4d6 100644 --- a/.github/workflows/erlang-checks.yaml +++ b/.github/workflows/erlang-checks.yaml @@ -31,8 +31,7 @@ jobs: with: otp-version: ${{ needs.setup.outputs.otp-version }} rebar-version: ${{ needs.setup.outputs.rebar-version }} - use-thrift: true - thrift-version: ${{ needs.setup.outputs.thrift-version }} + use-thrift: false run-ct-with-compose: true use-coveralls: true upload-coverage: false From d3bd297424f2edbf48a0174751bb7fb664d12b0b Mon Sep 17 00:00:00 2001 From: ttt161 Date: Thu, 11 Sep 2025 11:16:19 +0300 Subject: [PATCH 5/5] cleanup tests --- test/prg_base_SUITE.erl | 101 ++++------------------------------------ 1 file changed, 8 insertions(+), 93 deletions(-) diff --git a/test/prg_base_SUITE.erl b/test/prg_base_SUITE.erl index cbb1eaf..8102593 100644 --- a/test/prg_base_SUITE.erl +++ b/test/prg_base_SUITE.erl @@ -100,21 +100,8 @@ simple_timers_test(C) -> 3 = expect_steps_counter(3), ExpectedAux = erlang:term_to_binary(<<"aux_state2">>), timer:sleep(?AWAIT_TIMEOUT(C)), - {ok, #{ - status := <<"running">>, - state := #{ - timestamp := _, - metadata := #{<<"format_version">> := 1}, - process_id := Id, - task_id := _, - generation := 3, - payload := _ - }, - metadata := #{<<"k">> := <<"v">>}, - process_id := Id, - aux_state := ExpectedAux, - current_generation := 3 - }} = progressor:get(#{ns => ?NS(C), id => Id}), + {ok, ?PROCESS_EXPECTED(Id, 3, 3, <<"running">>) = Process} = progressor:get(#{ns => ?NS(C), id => Id}), + ?assertEqual(ExpectedAux, maps:get(aux_state, Process)), unmock_processor(), ok. %% @@ -130,19 +117,7 @@ simple_call_test(C) -> {ok, <<"response">>} = progressor:call(#{ns => ?NS(C), id => Id, args => <<"call_args">>}), 3 = expect_steps_counter(3), timer:sleep(?AWAIT_TIMEOUT(C)), - {ok, #{ - status := <<"running">>, - state := #{ - timestamp := _, - metadata := #{<<"format_version">> := 1}, - process_id := Id, - task_id := _, - generation := 3, - payload := _ - }, - process_id := Id, - current_generation := 3 - }} = progressor:get(#{ns => ?NS(C), id => Id}), + {ok, ?PROCESS_EXPECTED(Id, 3, 3, <<"running">>)} = progressor:get(#{ns => ?NS(C), id => Id}), unmock_processor(), ok. %% @@ -170,32 +145,8 @@ simple_call_with_generation_test(C) -> }), 4 = expect_steps_counter(4), timer:sleep(?AWAIT_TIMEOUT(C)), - {ok, #{ - status := <<"running">>, - state := #{ - timestamp := _, - metadata := #{<<"format_version">> := 1}, - process_id := Id, - task_id := _, - generation := 4, - payload := _ - }, - process_id := Id, - current_generation := 4 - }} = progressor:get(#{ns => ?NS(C), id => Id}), - {ok, #{ - status := <<"running">>, - state := #{ - timestamp := _, - metadata := #{<<"format_version">> := 1}, - process_id := Id, - task_id := _, - generation := 2, - payload := _ - }, - process_id := Id, - current_generation := 4 - }} = progressor:get(#{ns => ?NS(C), id => Id, generation => 2}), + {ok, ?PROCESS_EXPECTED(Id, 4, 4, <<"running">>)} = progressor:get(#{ns => ?NS(C), id => Id}), + {ok, ?PROCESS_EXPECTED(Id, 2, 4, <<"running">>)} = progressor:get(#{ns => ?NS(C), id => Id, generation => 2}), unmock_processor(), ok. %% @@ -212,19 +163,7 @@ call_replace_timer_test(C) -> 3 = expect_steps_counter(3), %% wait task_scan_timeout, maybe remove works timer:sleep(4000), - {ok, #{ - status := <<"running">>, - state := #{ - timestamp := _, - metadata := #{<<"format_version">> := 1}, - process_id := Id, - task_id := _, - generation := 3, - payload := _ - }, - process_id := Id, - current_generation := 3 - }} = progressor:get(#{ns => ?NS(C), id => Id}), + {ok, ?PROCESS_EXPECTED(Id, 3, 3, <<"running">>)} = progressor:get(#{ns => ?NS(C), id => Id}), unmock_processor(), ok. %% @@ -240,19 +179,7 @@ call_unset_timer_test(C) -> %% wait 3 steps but got 2 - good! 2 = expect_steps_counter(3), timer:sleep(?AWAIT_TIMEOUT(C)), - {ok, #{ - status := <<"running">>, - state := #{ - timestamp := _, - metadata := #{<<"format_version">> := 1}, - process_id := Id, - task_id := _, - generation := 2, - payload := _ - }, - process_id := Id, - current_generation := 2 - }} = progressor:get(#{ns => ?NS(C), id => Id}), + {ok, ?PROCESS_EXPECTED(Id, 2, 2, <<"running">>)} = progressor:get(#{ns => ?NS(C), id => Id}), unmock_processor(), ok. %% @@ -270,19 +197,7 @@ postponed_call_test(C) -> {ok, <<"response">>} = progressor:call(#{ns => ?NS(C), id => Id, args => <<"call_args">>}), 4 = expect_steps_counter(4), timer:sleep(?AWAIT_TIMEOUT(C)), - {ok, #{ - status := <<"running">>, - state := #{ - timestamp := _, - metadata := #{<<"format_version">> := 1}, - process_id := Id, - task_id := _, - generation := 4, - payload := _ - }, - process_id := Id, - current_generation := 4 - }} = progressor:get(#{ns => ?NS(C), id => Id}), + {ok, ?PROCESS_EXPECTED(Id, 4, 4, <<"running">>)} = progressor:get(#{ns => ?NS(C), id => Id}), unmock_processor(), ok. %%