Merge pull request #13 from aeternity/GH12-check-return-values

Check return values on update operations.
This commit is contained in:
Ulf Wiger 2019-11-04 13:54:11 +01:00 committed by GitHub
commit 49ff8bcca0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 435 additions and 113 deletions

View File

@ -12,9 +12,13 @@ docs:
check: check:
$(REBAR3) dialyzer $(REBAR3) dialyzer
test: eunit:
$(REBAR3) eunit $(suite) $(REBAR3) eunit $(suite)
ct:
$(REBAR3) ct $(suite)
test: eunit ct
conf_clean: conf_clean:
@: @:

View File

@ -33,6 +33,68 @@ This means that a prefix key identifies the start of the sequence of
entries whose keys match the prefix. The backend uses this to optimize entries whose keys match the prefix. The backend uses this to optimize
selects on prefix keys. selects on prefix keys.
## Customization
RocksDB supports a number of customization options. These can be specified
by providing a `{Key, Value}` list named `rocksdb_opts` under `user_properties`,
for example:
```erlang
mnesia:create_table(foo, [{rocksdb_copies, [node()]},
...
{user_properties,
[{rocksdb_opts, [{max_open_files, 1024}]}]
}])
```
Consult the [RocksDB documentation](https://github.com/facebook/rocksdb/wiki/Setup-Options-and-Basic-Tuning)
for information on configuration parameters. Also see the section below on handling write errors.
The default configuration for tables in `mnesia_rocksdb` is:
```
default_open_opts() ->
[ {create_if_missing, true}
, {cache_size,
list_to_integer(get_env_default("ROCKSDB_CACHE_SIZE", "32212254"))}
, {block_size, 1024}
, {max_open_files, 100}
, {write_buffer_size,
list_to_integer(get_env_default(
"ROCKSDB_WRITE_BUFFER_SIZE", "4194304"))}
, {compression,
list_to_atom(get_env_default("ROCKSDB_COMPRESSION", "true"))}
, {use_bloomfilter, true}
].
```
It is also possible, for larger databases, to produce a tuning parameter file.
This is experimental, and mostly copied from `mnesia_leveldb`. Consult the
source code in `mnesia_rocksdb_tuning.erl` and `mnesia_rocksdb_params.erl`.
Contributions are welcome.
## Handling of errors in write operations
The RocksDB update operations return either `ok` or `{error, any()}`.
Since the actual updates are performed after the 'point-of-no-return',
returning an `error` result will cause mnesia to behave unpredictably, since
the operations are expected to simply work.
An `on_write_error` option can be provided, per-table, in the `rocksdb_opts`
user property (see [Customization](#customization) above). Supported values indicate at which level an error
indication should be reported. Mnesia may save reported events in RAM, and may
also print them, depending on the debug level (controlled with `mnesia:set_debug_level/1`).
Mnesia debug levels are, in increasing detail, `none | verbose | debug | trace`
The supported values for `on_write_error` are:
| Value | Saved at debug level | Printed at debug level | Action |
| ------- | -------------------- | ---------------------- | --------- |
| debug | unless none | verbose, debug, trace | ignore |
| verbose | unless none | verbose, debug, trace | ignore |
| warning | always | always | ignore |
| error | always | always | exception |
| fatal | always | always | core dump |
## Caveats ## Caveats
Avoid placing `bag` tables in RocksDB. Although they work, each write Avoid placing `bag` tables in RocksDB. Although they work, each write

View File

@ -9,6 +9,7 @@
[ [
{test, {test,
[ [
{deps, [{proper, "1.2.0"}]} {deps, [ {proper, "1.2.0"}
, {meck, "0.8.13"}]}
]} ]}
]}. ]}.

View File

@ -165,24 +165,28 @@
%% RECORDS %% RECORDS
%% ---------------------------------------------------------------------------- %% ----------------------------------------------------------------------------
-record(sel, {alias, % TODO: not used -record(sel, { alias % TODO: not used
tab, , tab
ref, , ref
keypat, , keypat
ms, % TODO: not used , ms % TODO: not used
compiled_ms, , compiled_ms
limit, , limit
key_only = false, % TODO: not used , key_only = false % TODO: not used
direction = forward}). % TODO: not used , direction = forward}). % TODO: not used
-type on_write_error() :: debug | verbose | warning | error | fatal.
-define(WRITE_ERR_DEFAULT, verbose).
-record(st, { ets -record(st, { ets
, ref , ref
, alias , alias
, tab , tab
, type , type
, size_warnings % integer() , size_warnings % integer()
, maintain_size % boolean() , maintain_size % boolean()
}). , on_write_error = ?WRITE_ERR_DEFAULT :: on_write_error()
}).
%% ---------------------------------------------------------------------------- %% ----------------------------------------------------------------------------
%% CONVENIENCE API %% CONVENIENCE API
@ -338,21 +342,32 @@ prefixes(_) ->
%% set is OK as ordered_set is a kind of set. %% set is OK as ordered_set is a kind of set.
check_definition(Alias, Tab, Nodes, Props) -> check_definition(Alias, Tab, Nodes, Props) ->
Id = {Alias, Nodes}, Id = {Alias, Nodes},
Props1 = lists:map( try lists:map(
fun({type, T} = P) -> fun({type, T} = P) ->
if T==set; T==ordered_set; T==bag -> if T==set; T==ordered_set; T==bag ->
P; P;
true -> true ->
mnesia:abort({combine_error, mnesia:abort({combine_error,
Tab, Tab,
[Id, {type,T}]}) [Id, {type,T}]})
end; end;
({user_properties, _} = P) -> ({user_properties, UPs} = P) ->
%% should perhaps verify rocksdb options... RdbOpts = proplists:get_value(rocksdb_opts, UPs, []),
P; OWE = proplists:get_value(on_write_error, RdbOpts, ?WRITE_ERR_DEFAULT),
(P) -> P case valid_mnesia_op(OWE) of
end, Props), true ->
{ok, Props1}. P;
false ->
throw({error, {invalid, {on_write_error, OWE}}})
end;
(P) -> P
end, Props) of
Props1 ->
{ok, Props1}
catch
throw:Error ->
Error
end.
%% -> ok | {error, exists} %% -> ok | {error, exists}
create_table(_Alias, Tab, _Props) -> create_table(_Alias, Tab, _Props) ->
@ -400,7 +415,9 @@ load_table_(Alias, Tab, Type, RdbOpts) ->
%% transform_table on a rocksdb_table that has indexing. %% transform_table on a rocksdb_table that has indexing.
?dbg("ERR: table:~p already loaded pid:~p stack:~p~n", ?dbg("ERR: table:~p already loaded pid:~p stack:~p~n",
[Tab, _Pid, _Stack]), [Tab, _Pid, _Stack]),
ok ok;
{error, Other} ->
mnesia:abort(Other)
end. end.
close_table(Alias, Tab) -> close_table(Alias, Tab) ->
@ -798,26 +815,23 @@ slot_iter_set(Res, _, _, _) when element(1, Res) =/= ok ->
'$end_of_table'. '$end_of_table'.
update_counter(Alias, Tab, C, Val) when is_integer(Val) -> update_counter(Alias, Tab, C, Val) when is_integer(Val) ->
case call(Alias, Tab, {update_counter, C, Val}) of call(Alias, Tab, {update_counter, C, Val}).
badarg ->
mnesia:abort(badarg);
Res ->
Res
end.
%% server-side part %% server-side part
do_update_counter(C, Val, Ref) -> do_update_counter(C, Val, Ref, St) ->
Enc = encode_key(C), Enc = encode_key(C),
case ?rocksdb:get(Ref, Enc, [{fill_cache, true}]) of case ?rocksdb:get(Ref, Enc, [{fill_cache, true}]) of
{ok, EncVal} -> {ok, EncVal} ->
case decode_val(EncVal) of case decode_val(EncVal) of
{_, _, Old} = Rec when is_integer(Old) -> {_, _, Old} = Rec when is_integer(Old) ->
Res = Old+Val, Res = Old+Val,
?rocksdb:put(Ref, Enc, return_catch(
encode_val( fun() ->
setelement(3, Rec, Res)), db_put(Ref, Enc,
[]), encode_val(
Res; setelement(3, Rec, Res)),
[], St)
end);
_ -> _ ->
badarg badarg
end; end;
@ -877,16 +891,22 @@ start_proc(Alias, Tab, Type, RdbOpts) ->
init({Alias, Tab, Type, RdbOpts}) -> init({Alias, Tab, Type, RdbOpts}) ->
process_flag(trap_exit, true), process_flag(trap_exit, true),
{ok, Ref, Ets} = do_load_table(Tab, RdbOpts), try
St = #st{ ets = Ets {ok, Ref, Ets} = do_load_table(Tab, RdbOpts),
, ref = Ref St = #st{ ets = Ets
, alias = Alias , ref = Ref
, tab = Tab , alias = Alias
, type = Type , tab = Tab
, size_warnings = 0 , type = Type
, maintain_size = should_maintain_size(Tab) , size_warnings = 0
}, , maintain_size = should_maintain_size(Tab)
{ok, recover_size_info(St)}. },
OnWriteError = proplists:get_value(on_write_error, RdbOpts, St#st.on_write_error),
{ok, recover_size_info(St#st{on_write_error = OnWriteError})}
catch
throw:badarg ->
{error, write_error}
end.
do_load_table(Tab, RdbOpts) -> do_load_table(Tab, RdbOpts) ->
MPd = data_mountpoint(Tab), MPd = data_mountpoint(Tab),
@ -906,13 +926,13 @@ handle_call({write_info, Key, Value}, _From, #st{} = St) ->
_ = write_info_(Key, Value, St), _ = write_info_(Key, Value, St),
{reply, ok, St}; {reply, ok, St};
handle_call({update_counter, C, Incr}, _From, #st{ref = Ref} = St) -> handle_call({update_counter, C, Incr}, _From, #st{ref = Ref} = St) ->
{reply, do_update_counter(C, Incr, Ref), St}; {reply, do_update_counter(C, Incr, Ref, St), St};
handle_call({insert, Key, Val}, _From, St) -> handle_call({insert, Key, Val}, _From, St) ->
do_insert(Key, Val, St), Res = do_insert(Key, Val, St),
{reply, ok, St}; {reply, Res, St};
handle_call({delete, Key}, _From, St) -> handle_call({delete, Key}, _From, St) ->
do_delete(Key, St), Res = do_delete(Key, St),
{reply, ok, St}; {reply, Res, St};
handle_call(clear_table, _From, #st{ets = Ets, tab = Tab, ref = Ref} = St) -> handle_call(clear_table, _From, #st{ets = Ets, tab = Tab, ref = Ref} = St) ->
MPd = data_mountpoint(Tab), MPd = data_mountpoint(Tab),
?dbg("Attempting clear_table(~p)~n", [Tab]), ?dbg("Attempting clear_table(~p)~n", [Tab]),
@ -1162,16 +1182,19 @@ size_warning(Alias, Tab) ->
gen_server:cast(ProcName, size_warning). gen_server:cast(ProcName, size_warning).
%% server-side end of insert/3. %% server-side end of insert/3.
do_insert(K, V, #st{ref = Ref, type = bag, maintain_size = false}) -> do_insert(K, V, #st{ref = Ref, type = bag, maintain_size = false} = St) ->
do_insert_bag(Ref, K, V, false); return_catch(fun() -> do_insert_bag(Ref, K, V, false, St) end);
do_insert(K, V, #st{ets = Ets, ref = Ref, type = bag, maintain_size = true}) -> do_insert(K, V, #st{ets = Ets, ref = Ref, type = bag, maintain_size = true} = St) ->
CurSz = read_info(size, 0, Ets), return_catch(
NewSz = do_insert_bag(Ref, K, V, CurSz), fun() ->
ets_insert_info(Ets, size, NewSz), CurSz = read_info(size, 0, Ets),
ok; NewSz = do_insert_bag(Ref, K, V, CurSz, St),
do_insert(K, V, #st{ref = Ref, maintain_size = false}) -> ets_insert_info(Ets, size, NewSz),
?rocksdb:put(Ref, K, V, []); ok
do_insert(K, V, #st{ets = Ets, ref = Ref, maintain_size = true}) -> end);
do_insert(K, V, #st{ref = Ref, maintain_size = false} = St) ->
return_catch(fun() -> db_put(Ref, K, V, [], St) end);
do_insert(K, V, #st{ets = Ets, ref = Ref, maintain_size = true} = St) ->
IsNew = IsNew =
case ?rocksdb:get(Ref, K, []) of case ?rocksdb:get(Ref, K, []) of
{ok, _} -> {ok, _} ->
@ -1181,70 +1204,81 @@ do_insert(K, V, #st{ets = Ets, ref = Ref, maintain_size = true}) ->
end, end,
case IsNew of case IsNew of
true -> true ->
NewSz = read_info(size, 0, Ets) + 1, return_catch(
{Ki, Vi} = info_obj(size, NewSz), fun() ->
?rocksdb:write(Ref, [{put, Ki, Vi}, {put, K, V}], []), NewSz = read_info(size, 0, Ets) + 1,
ets_insert_info(Ets, size, NewSz); {Ki, Vi} = info_obj(size, NewSz),
L = [{put, Ki, Vi}, {put, K, V}],
write_result(mnesia_rocksdb_lib:write(Ref, L, []),
write, [Ref, L, []], St), % may throw
ets_insert_info(Ets, size, NewSz)
end);
false -> false ->
?rocksdb:put(Ref, K, V, []) return_catch(fun() -> db_put(Ref, K, V, [], St) end)
end, end,
ok. ok.
do_insert_bag(Ref, K, V, CurSz) -> do_insert_bag(Ref, K, V, CurSz, St) ->
KSz = byte_size(K), KSz = byte_size(K),
with_iterator( with_iterator(
Ref, fun(I) -> Ref, fun(I) ->
do_insert_bag_( do_insert_bag_(
KSz, K, ?rocksdb:iterator_move(I, K), I, V, 0, Ref, CurSz) KSz, K, ?rocksdb:iterator_move(I, K), I, V, 0, Ref, CurSz, St)
end). end).
%% There's a potential access pattern that would force counters to %% There's a potential access pattern that would force counters to
%% creep upwards and eventually hit the limit. This could be addressed, %% creep upwards and eventually hit the limit. This could be addressed,
%% with compaction. TODO. %% with compaction. TODO.
do_insert_bag_(Sz, K, Res, I, V, Prev, Ref, TSz) when Prev < ?MAX_BAG -> do_insert_bag_(Sz, K, Res, I, V, Prev, Ref, TSz, St) when Prev < ?MAX_BAG ->
case Res of case Res of
{ok, <<K:Sz/binary, _:?BAG_CNT>>, V} -> {ok, <<K:Sz/binary, _:?BAG_CNT>>, V} ->
%% object exists %% object exists
TSz; TSz;
{ok, <<K:Sz/binary, N:?BAG_CNT>>, _} -> {ok, <<K:Sz/binary, N:?BAG_CNT>>, _} ->
do_insert_bag_( do_insert_bag_(
Sz, K, ?rocksdb:iterator_move(I, next), I, V, N, Ref, TSz); Sz, K, ?rocksdb:iterator_move(I, next), I, V, N, Ref, TSz, St);
_ when TSz =:= false -> _ when TSz =:= false ->
Key = <<K/binary, (Prev+1):?BAG_CNT>>, Key = <<K/binary, (Prev+1):?BAG_CNT>>,
?rocksdb:put(Ref, Key, V, []); db_put(Ref, Key, V, [], St);
_ -> _ ->
NewSz = TSz + 1, NewSz = TSz + 1,
{Ki, Vi} = info_obj(size, NewSz), {Ki, Vi} = info_obj(size, NewSz),
Key = <<K/binary, (Prev+1):?BAG_CNT>>, Key = <<K/binary, (Prev+1):?BAG_CNT>>,
?rocksdb:write(Ref, [{put, Ki, Vi}, {put, Key, V}], []), db_write(Ref, [{put, Ki, Vi}, {put, Key, V}], [], St),
NewSz NewSz
end. end.
%% server-side part %% server-side part
do_delete(Key, #st{ref = Ref, type = bag, maintain_size = false}) -> do_delete(Key, #st{ref = Ref, type = bag, maintain_size = false} = St) ->
do_delete_bag(byte_size(Key), Key, Ref, false); return_catch(fun() -> do_delete_bag(byte_size(Key), Key, Ref, false, St) end);
do_delete(Key, #st{ets = Ets, ref = Ref, type = bag, maintain_size = true}) -> do_delete(Key, #st{ets = Ets, ref = Ref, type = bag, maintain_size = true} = St) ->
Sz = byte_size(Key), return_catch(
CurSz = read_info(size, 0, Ets), fun() ->
NewSz = do_delete_bag(Sz, Key, Ref, CurSz), Sz = byte_size(Key),
ets_insert_info(Ets, size, NewSz), CurSz = read_info(size, 0, Ets),
ok; NewSz = do_delete_bag(Sz, Key, Ref, CurSz, St),
do_delete(Key, #st{ref = Ref, maintain_size = false}) -> ets_insert_info(Ets, size, NewSz),
?rocksdb:delete(Ref, Key, []); ok
do_delete(Key, #st{ets = Ets, ref = Ref, maintain_size = true}) -> end);
do_delete(Key, #st{ref = Ref, maintain_size = false} = St) ->
return_catch(fun() -> db_delete(Ref, Key, [], St) end);
do_delete(Key, #st{ets = Ets, ref = Ref, maintain_size = true} = St) ->
CurSz = read_info(size, 0, Ets), CurSz = read_info(size, 0, Ets),
case ?rocksdb:get(Ref, Key, [{fill_cache,true}]) of case ?rocksdb:get(Ref, Key, [{fill_cache,true}]) of
{ok, _} -> {ok, _} ->
NewSz = CurSz -1, return_catch(
{Ki, Vi} = info_obj(size, NewSz), fun() ->
ok = ?rocksdb:write(Ref, [{delete, Key}, {put, Ki, Vi}], []), NewSz = CurSz -1,
ets_insert_info(Ets, size, NewSz); {Ki, Vi} = info_obj(size, NewSz),
ok = db_write(Ref, [{delete, Key}, {put, Ki, Vi}], [], St),
ets_insert_info(Ets, size, NewSz)
end);
not_found -> not_found ->
false false
end. end.
do_delete_bag(Sz, Key, Ref, TSz) -> do_delete_bag(Sz, Key, Ref, TSz, St) ->
Found = Found =
with_iterator( with_iterator(
Ref, fun(I) -> Ref, fun(I) ->
@ -1255,13 +1289,13 @@ do_delete_bag(Sz, Key, Ref, TSz) ->
{[], _} -> {[], _} ->
TSz; TSz;
{_, false} -> {_, false} ->
?rocksdb:write(Ref, [{delete, K} || K <- Found], []); db_write(Ref, [{delete, K} || K <- Found], [], St);
{_, _} -> {_, _} ->
N = length(Found), N = length(Found),
NewSz = TSz - N, NewSz = TSz - N,
{Ki, Vi} = info_obj(size, NewSz), {Ki, Vi} = info_obj(size, NewSz),
?rocksdb:write(Ref, [{put, Ki, Vi} | db_write(Ref, [{put, Ki, Vi} |
[{delete, K} || K <- Found]], []), [{delete, K} || K <- Found]], [], St),
NewSz NewSz
end. end.
@ -1279,21 +1313,21 @@ do_delete_bag_(Sz, K, Res, Ref, I) ->
end. end.
do_match_delete(Pat, #st{ets = Ets, ref = Ref, tab = Tab, type = Type, do_match_delete(Pat, #st{ets = Ets, ref = Ref, tab = Tab, type = Type,
maintain_size = MaintainSize}) -> maintain_size = MaintainSize} = St) ->
Fun = fun(_, Key, Acc) -> [Key|Acc] end, Fun = fun(_, Key, Acc) -> [Key|Acc] end,
Keys = do_fold(Ref, Tab, Type, Fun, [], [{Pat,[],['$_']}], 30), Keys = do_fold(Ref, Tab, Type, Fun, [], [{Pat,[],['$_']}], 30),
case {Keys, MaintainSize} of case {Keys, MaintainSize} of
{[], _} -> {[], _} ->
ok; ok;
{_, false} -> {_, false} ->
?rocksdb:write(Ref, [{delete, K} || K <- Keys], []), db_write(Ref, [{delete, K} || K <- Keys], [], St),
ok; ok;
{_, true} -> {_, true} ->
CurSz = read_info(size, 0, Ets), CurSz = read_info(size, 0, Ets),
NewSz = max(CurSz - length(Keys), 0), NewSz = max(CurSz - length(Keys), 0),
{Ki, Vi} = info_obj(size, NewSz), {Ki, Vi} = info_obj(size, NewSz),
?rocksdb:write(Ref, [{put, Ki, Vi} | db_write(Ref, [{put, Ki, Vi} |
[{delete, K} || K <- Keys]], []), [{delete, K} || K <- Keys]], [], St),
ets_insert_info(Ets, size, NewSz), ets_insert_info(Ets, size, NewSz),
ok ok
end. end.
@ -1339,8 +1373,8 @@ property(Tab, Prop, Default) ->
exit:_ -> Default exit:_ -> Default
end. end.
write_info_(Item, Val, #st{ets = Ets, ref = Ref}) -> write_info_(Item, Val, #st{ets = Ets, ref = Ref} = St) ->
rocksdb_insert_info(Ref, Item, Val), rocksdb_insert_info(Ref, Item, Val, St),
ets_insert_info(Ets, Item, Val). ets_insert_info(Ets, Item, Val).
ets_insert_info(Ets, Item, Val) -> ets_insert_info(Ets, Item, Val) ->
@ -1349,14 +1383,14 @@ ets_insert_info(Ets, Item, Val) ->
ets_delete_info(Ets, Item) -> ets_delete_info(Ets, Item) ->
ets:delete(Ets, {info, Item}). ets:delete(Ets, {info, Item}).
rocksdb_insert_info(Ref, Item, Val) -> rocksdb_insert_info(Ref, Item, Val, St) ->
EncKey = info_key(Item), EncKey = info_key(Item),
EncVal = encode_val(Val), EncVal = encode_val(Val),
?rocksdb:put(Ref, EncKey, EncVal, []). db_put(Ref, EncKey, EncVal, [], St).
rocksdb_delete_info(Ref, Item) -> rocksdb_delete_info(Ref, Item, St) ->
EncKey = info_key(Item), EncKey = info_key(Item),
?rocksdb:delete(Ref, EncKey, []). db_delete(Ref, EncKey, [], St).
info_obj(Item, Val) -> info_obj(Item, Val) ->
{info_key(Item), encode_val(Val)}. {info_key(Item), encode_val(Val)}.
@ -1364,8 +1398,8 @@ info_obj(Item, Val) ->
info_key(Item) -> info_key(Item) ->
<<?INFO_TAG, (encode_key(Item))/binary>>. <<?INFO_TAG, (encode_key(Item))/binary>>.
delete_info_(Item, #st{ets = Ets, ref = Ref}) -> delete_info_(Item, #st{ets = Ets, ref = Ref} = St) ->
rocksdb_delete_info(Ref, Item), rocksdb_delete_info(Ref, Item, St),
ets_delete_info(Ets, Item). ets_delete_info(Ets, Item).
read_info(Item, Default, Ets) -> read_info(Item, Default, Ets) ->
@ -1555,6 +1589,58 @@ keypat_pfx({HeadPat,_Gs,_}, KeyPos) when is_tuple(HeadPat) ->
keypat_pfx(_, _) -> keypat_pfx(_, _) ->
<<>>. <<>>.
%% ----------------------------------------------------------------------------
%% Db wrappers
%% ----------------------------------------------------------------------------
return_catch(F) when is_function(F, 0) ->
try F()
catch
throw:badarg ->
badarg
end.
db_put(Ref, K, V, Opts, St) ->
write_result(mnesia_rocksdb_lib:put(Ref, K, V, Opts), put, [Ref, K, V, Opts], St).
db_write(Ref, List, Opts, St) ->
write_result(mnesia_rocksdb_lib:write(Ref, List, Opts), write, [Ref, List, Opts], St).
db_delete(Ref, K, Opts, St) ->
write_result(mnesia_rocksdb_lib:delete(Ref, K, Opts), delete, [Ref, K, Opts], St).
write_result(ok, _, _, _) ->
ok;
write_result(Res, Op, Args, #st{on_write_error = Rpt}) ->
RptOp = rpt_op(Rpt),
mnesia_lib:RptOp("FAILED rocksdb:~p(" ++ rpt_fmt(Args) ++ ") -> ~p~n",
[Op | Args] ++ [Res]),
if Rpt == fatal; Rpt == error ->
throw(badarg);
true ->
ok
end.
rpt_fmt([_|T]) ->
lists:append(["~p" | [", ~p" || _ <- T]]).
rpt_op(debug) ->
dbg_out;
rpt_op(Op) ->
Op.
valid_mnesia_op(Op) ->
if Op==debug
; Op==verbose
; Op==warning
; Op==error
; Op==fatal ->
true;
true ->
false
end.
%% ---------------------------------------------------------------------------- %% ----------------------------------------------------------------------------
%% COMMON PRIVATE %% COMMON PRIVATE
%% ---------------------------------------------------------------------------- %% ----------------------------------------------------------------------------

View File

@ -0,0 +1,17 @@
%%% @doc RocksDB update wrappers, in separate module for easy tracing and mocking.
%%%
-module(mnesia_rocksdb_lib).
-export([put/4,
write/3,
delete/3]).
put(Ref, K, V, Opts) ->
rocksdb:put(Ref, K, V, Opts).
write(Ref, L, Opts) ->
rocksdb:write(Ref, L, Opts).
delete(Ref, K, Opts) ->
rocksdb:delete(Ref, K, Opts).

View File

@ -0,0 +1,49 @@
-module(mnesia_rocksdb_SUITE).
-export([
all/0
, groups/0
, suite/0
, init_per_suite/1
, end_per_suite/1
, init_per_group/2
, end_per_group/2
, init_per_testcase/2
, end_per_testcase/2
]).
-export([error_handling/1]).
-include_lib("common_test/include/ct.hrl").
suite() ->
[].
all() ->
[{group, all_tests}].
groups() ->
[{all_tests, [sequence], [error_handling]}].
error_handling(_Config) ->
mnesia_rocksdb_error_handling:run().
init_per_suite(Config) ->
Config.
end_per_suite(_Config) ->
ok.
init_per_group(_, Config) ->
Config.
end_per_group(_, _Config) ->
ok.
init_per_testcase(_, Config) ->
Config.
end_per_testcase(_, _Config) ->
ok.

View File

@ -0,0 +1,103 @@
-module(mnesia_rocksdb_error_handling).
-export([run/0,
run/4]).
run() ->
setup(),
%% run only one test for 'fatal', to save time.
[run(Type, Op, L, MaintainSz) || MaintainSz <- [false, true],
Type <- [set, bag],
Op <- [insert, update, delete],
L <- levels()]
++ [run(set, insert, fatal, false)].
run(Type, Op, Level, MaintainSz) ->
setup(),
{ok, Tab} = create_tab(Type, Level, MaintainSz),
mnesia:dirty_write({Tab, a, 1}), % pre-existing data
with_mock(Level, Op, Tab, fun() ->
try_write(Op, Type, Tab),
expect_error(Level, Tab)
end).
levels() ->
[debug, verbose, warning, error].
setup() ->
mnesia:stop(),
start_mnesia().
create_tab(Type, Level, MaintainSz) ->
TabName = tab_name(Type, Level, MaintainSz),
UserProps = user_props(Level, MaintainSz),
{atomic, ok} = mnesia:create_table(TabName, [{rdb, [node()]},
{user_properties, UserProps}]),
{ok, TabName}.
tab_name(Type, Level, MaintainSz) ->
binary_to_atom(iolist_to_binary(
["t" | [["_", atom_to_list(A)]
|| A <- [?MODULE, Type, Level, MaintainSz]]]), utf8).
user_props(Level, MaintainSz) ->
[{maintain_sz, MaintainSz},
{rocksdb_opts, [{on_write_error, Level}]}].
start_mnesia() ->
mnesia_rocksdb_tlib:start_mnesia(reset),
ok.
with_mock(Level, Op, Tab, F) ->
mnesia:subscribe(system),
mnesia:set_debug_level(debug),
meck:new(mnesia_rocksdb_lib, [passthrough]),
meck:expect(mnesia_rocksdb_lib, put, 4, {error, some_put_error}),
meck:expect(mnesia_rocksdb_lib, write, 3, {error, some_write_error}),
meck:expect(mnesia_rocksdb_lib, delete, 3, {error,some_delete_error}),
try {Level, Op, Tab, F()} of
{_, _, _, ok} ->
ok;
Other ->
io:fwrite("OTHER: ~p~n", [Other]),
ok
catch
exit:{{aborted,_},_} ->
Level = error,
ok
after
mnesia:set_debug_level(none),
mnesia:unsubscribe(system),
meck:unload(mnesia_rocksdb_lib)
end.
try_write(insert, set, Tab) ->
mnesia:dirty_write({Tab, b, 2});
try_write(insert, bag, Tab) ->
mnesia:dirty_write({Tab, a, 2});
try_write(update, _, Tab) ->
mnesia:dirty_write({Tab, a, 1});
try_write(delete, _, Tab) ->
mnesia:dirty_delete({Tab, a}).
expect_error(Level, Tab) ->
Tag = rpt_tag(Level),
receive
{mnesia_system_event, {mnesia_fatal, Fmt, Args, _Core}} ->
Tag = mnesia_fatal,
io:fwrite("EVENT(~p, ~p):~n ~s", [Tag, Tab, io_lib:fwrite(Fmt, Args)]),
ok;
{mnesia_system_event, {Tag, Fmt, Args}} ->
io:fwrite("EVENT(~p, ~p):~n ~s", [Tag, Tab, io_lib:fwrite(Fmt, Args)]),
ok
after 1000 ->
error({expected_error, [Level, Tab]})
end.
rpt_tag(fatal ) -> mnesia_fatal;
rpt_tag(error ) -> mnesia_error;
rpt_tag(warning) -> mnesia_warning;
rpt_tag(verbose) -> mnesia_info;
rpt_tag(debug ) -> mnesia_info.