Compare commits
13 Commits
uw-export-
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
| e85df7c27c | |||
|
|
2078ae7115 | ||
|
|
8e795da00d | ||
|
|
bfccda2c3f | ||
| 0e4b6d7873 | |||
|
|
02eaf1ee47 | ||
|
|
1f6066705c | ||
| 283e0274e0 | |||
|
|
847ffc810a | ||
|
|
1e60f35dd3 | ||
|
|
570f31ab3c | ||
|
|
cb0d8f3689 | ||
|
|
5eff27367d |
2
.gitignore
vendored
2
.gitignore
vendored
@ -19,7 +19,7 @@ VERSION
|
|||||||
*.aes~
|
*.aes~
|
||||||
*.config~
|
*.config~
|
||||||
*.args~
|
*.args~
|
||||||
data/mnesia
|
data/*
|
||||||
_checkouts*/
|
_checkouts*/
|
||||||
tmp/
|
tmp/
|
||||||
rebar3.crashdump
|
rebar3.crashdump
|
||||||
|
|||||||
4
Makefile
4
Makefile
@ -1,10 +1,10 @@
|
|||||||
|
|
||||||
BUILD="_build/default/lib/gmhive_client"
|
BUILD=_build/default/lib/gmhive_client
|
||||||
SCHEMA_OUT=$(BUILD)/priv/gmhc_schema.json
|
SCHEMA_OUT=$(BUILD)/priv/gmhc_schema.json
|
||||||
|
|
||||||
schema:
|
schema:
|
||||||
ERL_LIBS=_build/default/lib \
|
ERL_LIBS=_build/default/lib \
|
||||||
erl -run gmhc_config_schema export $(SCHEMA_OUT) -run init stop
|
erl -run gmhc_config_schema export $(SCHEMA_OUT) -run init stop
|
||||||
scripts/update_readme_json.sh README.md $(SCHEMA_OUT)
|
scripts/update_readme_json.sh "README.md" "$(SCHEMA_OUT)"
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
18
README.md
18
README.md
@ -82,6 +82,20 @@ only does this inform the client of the network for which the client should
|
|||||||
mine, but it also changes what URL the client will use to find the hive
|
mine, but it also changes what URL the client will use to find the hive
|
||||||
server. (For `"testnet"` this will be `"test.gajumining.com"`.)
|
server. (For `"testnet"` this will be `"test.gajumining.com"`.)
|
||||||
|
|
||||||
|
### Caching of connection data
|
||||||
|
|
||||||
|
The client retrieves the Hive Server connection info from gajumining.com.
|
||||||
|
This information is account-specific, and may change over time.
|
||||||
|
|
||||||
|
To speed up the reconnecting process, should the Hive Server restart,
|
||||||
|
this connection information is cached locally for up to 24 hours.
|
||||||
|
The normal place for the cached data would be
|
||||||
|
`$ZOMP_DIR//var/uwiger/gmhive_client/Vsn/setup.data/mainnet/gmhc_eureka.XXXXXXXX.cache`,
|
||||||
|
where `Vsn` is the current version of `gmhive_client`, and `XXXXXXXX` is a fragment
|
||||||
|
of the current account ID. The data location can be changed with the command-line
|
||||||
|
option `-setup data_dir Dir`.
|
||||||
|
|
||||||
|
|
||||||
## JSON Schema
|
## JSON Schema
|
||||||
|
|
||||||
```json
|
```json
|
||||||
@ -89,6 +103,10 @@ server. (For `"testnet"` this will be `"test.gajumining.com"`.)
|
|||||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||||
"additionalProperties": false,
|
"additionalProperties": false,
|
||||||
"properties": {
|
"properties": {
|
||||||
|
"cache_dir": {
|
||||||
|
"description": "Location of cache, default is 'setup:data_dir()'",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
"extra_pubkeys": {
|
"extra_pubkeys": {
|
||||||
"default": [],
|
"default": [],
|
||||||
"description": "Additional worker pubkeys, sharing rewards",
|
"description": "Additional worker pubkeys, sharing rewards",
|
||||||
|
|||||||
@ -1,6 +1,6 @@
|
|||||||
{application,gmhive_client,
|
{application,gmhive_client,
|
||||||
[{description,"Gajumaru Hive Client"},
|
[{description,"Gajumaru Hive Client"},
|
||||||
{vsn,"0.6.3"},
|
{vsn,"0.10.0"},
|
||||||
{registered,[]},
|
{registered,[]},
|
||||||
{applications,[kernel,stdlib,sasl,gproc,inets,ssl,enoise,
|
{applications,[kernel,stdlib,sasl,gproc,inets,ssl,enoise,
|
||||||
gmconfig,gmhive_protocol,gmhive_worker]},
|
gmconfig,gmhive_protocol,gmhive_worker]},
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
#{ pubkey => <<"ak_zjyvDmMbXafMADYrxDs4uMiYM5zEVJBqWgv619NUQ37Gkwt7z">>
|
#{ pubkey => <<"ak_zjyvDmMbXafMADYrxDs4uMiYM5zEVJBqWgv619NUQ37Gkwt7z">>
|
||||||
|
, report => <<"progress">>
|
||||||
, pool_admin => #{url => <<"local">>}
|
, pool_admin => #{url => <<"local">>}
|
||||||
, pool => #{id => <<"ct_26xqeE3YKmZV8jrks57JSgZRCHSuG4RGzpnvdz6AAiSSTVbJRM">>,
|
, pool => #{id => <<"ct_26xqeE3YKmZV8jrks57JSgZRCHSuG4RGzpnvdz6AAiSSTVbJRM">>,
|
||||||
host => <<"127.0.0.1">>}
|
host => <<"127.0.0.1">>}
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
#{ pubkey => <<"ak_2KAcA2Pp1nrR8Wkt3FtCkReGzAi8vJ9Snxa4PcmrthVx8AhPe8">>
|
#{ pubkey => <<"ak_2KAcA2Pp1nrR8Wkt3FtCkReGzAi8vJ9Snxa4PcmrthVx8AhPe8">>
|
||||||
, pool => #{id => <<"ct_LRbi65kmLtE7YMkG6mvG5TxAXTsPJDZjAtsPuaXtRyPA7gnfJ">>}
|
, pool => #{id => <<"ct_LRbi65kmLtE7YMkG6mvG5TxAXTsPJDZjAtsPuaXtRyPA7gnfJ">>}
|
||||||
, mining =>
|
, workers =>
|
||||||
[#{executable => <<"mean15-generic">>}]
|
[#{executable => <<"mean15-generic">>}]
|
||||||
}.
|
}.
|
||||||
|
|||||||
139
priv/gmhc_schema.json
Normal file
139
priv/gmhc_schema.json
Normal file
@ -0,0 +1,139 @@
|
|||||||
|
{
|
||||||
|
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||||
|
"additionalProperties": false,
|
||||||
|
"properties": {
|
||||||
|
"cache_dir": {
|
||||||
|
"description": "Location of cache, default is 'setup:data_dir()'",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"extra_pubkeys": {
|
||||||
|
"default": [],
|
||||||
|
"description": "Additional worker pubkeys, sharing rewards",
|
||||||
|
"items": {
|
||||||
|
"pattern": "^ak_[1-9A-HJ-NP-Za-km-z]*$",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"type": "array"
|
||||||
|
},
|
||||||
|
"network": {
|
||||||
|
"default": "mainnet",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"pool": {
|
||||||
|
"additionalProperties": false,
|
||||||
|
"properties": {
|
||||||
|
"host": {
|
||||||
|
"default": "127.0.0.1",
|
||||||
|
"description": "Hostname of hive server",
|
||||||
|
"example": "0.0.0.0",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"id": {
|
||||||
|
"description": "Pool contract id",
|
||||||
|
"pattern": "^ct_[1-9A-HJ-NP-Za-km-z]*$",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"port": {
|
||||||
|
"default": 17888,
|
||||||
|
"description": "Hive server listen port",
|
||||||
|
"minimum": 1,
|
||||||
|
"type": "integer"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"type": "object"
|
||||||
|
},
|
||||||
|
"pool_admin": {
|
||||||
|
"additionalProperties": false,
|
||||||
|
"properties": {
|
||||||
|
"default_per_network": {
|
||||||
|
"additionalProperties": false,
|
||||||
|
"properties": {
|
||||||
|
"mainnet": {
|
||||||
|
"default": "https://gajumining.com/api/workers/{CLIENT_ID}",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"testnet": {
|
||||||
|
"default": "https://test.gajumining.com/api/workers/{CLIENT_ID}",
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"type": "object"
|
||||||
|
},
|
||||||
|
"url": {
|
||||||
|
"default": "https://test.gajumining.com/api/workers/{CLIENT_ID}",
|
||||||
|
"description": "URL of Eureka worker api",
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"type": "object"
|
||||||
|
},
|
||||||
|
"pubkey": {
|
||||||
|
"description": "Primary client pubkey",
|
||||||
|
"pattern": "^ak_[1-9A-HJ-NP-Za-km-z]*$",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"report": {
|
||||||
|
"default": "silent",
|
||||||
|
"description": "Progress reporting",
|
||||||
|
"enum": [
|
||||||
|
"debug",
|
||||||
|
"progress",
|
||||||
|
"silent"
|
||||||
|
],
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"type": {
|
||||||
|
"default": "worker",
|
||||||
|
"description": "monitor mode can be used to see if a pool is alive",
|
||||||
|
"enum": [
|
||||||
|
"worker",
|
||||||
|
"monitor"
|
||||||
|
],
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"workers": {
|
||||||
|
"default": [
|
||||||
|
{ "executable": "mean29-generic" }
|
||||||
|
],
|
||||||
|
"description": "Definitions of workers' configurations. If no worker are configured one worker is used as default, i.e. 'mean29-generic' executable without any extra args.",
|
||||||
|
"items": {
|
||||||
|
"additionalProperties": false,
|
||||||
|
"properties": {
|
||||||
|
"executable": {
|
||||||
|
"default": "mean29-generic",
|
||||||
|
"description": "Executable binary of the worker. Can be a fully qualified path, \nbut the software may apply default logic to locate a plain basename.",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"extra_args": {
|
||||||
|
"default": "",
|
||||||
|
"description": "Extra arguments to pass to the worker executable binary. The safest choice is specifying no arguments i.e. empty string.",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"hex_encoded_header": {
|
||||||
|
"default": false,
|
||||||
|
"description": "Hexadecimal encode the header argument that is send to the worker executable. CUDA executables expect hex encoded header.",
|
||||||
|
"type": "boolean"
|
||||||
|
},
|
||||||
|
"instances": {
|
||||||
|
"description": "Instances used by the worker in case of Multi-GPU mining. Numbers on the configuration list represent GPU devices that are to be addressed by the worker.",
|
||||||
|
"example": [0,1,2,3],
|
||||||
|
"items": { "type": "integer" },
|
||||||
|
"minItems": 1,
|
||||||
|
"type": "array"
|
||||||
|
},
|
||||||
|
"repeats": {
|
||||||
|
"default": 1,
|
||||||
|
"description": "Number of tries to do in each worker context - WARNING: it should be set so the worker process runs for 3-5s or else the node risk missing out on new micro blocks.",
|
||||||
|
"type": "integer"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": [
|
||||||
|
"executable"
|
||||||
|
],
|
||||||
|
"type": "object"
|
||||||
|
},
|
||||||
|
"type": "array"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"type": "object"
|
||||||
|
}
|
||||||
@ -11,7 +11,7 @@
|
|||||||
{enoise, {git, "https://git.qpq.swiss/QPQ-AG/enoise.git", {ref, "029292817e"}}},
|
{enoise, {git, "https://git.qpq.swiss/QPQ-AG/enoise.git", {ref, "029292817e"}}},
|
||||||
{gmhive_protocol,
|
{gmhive_protocol,
|
||||||
{git, "https://git.qpq.swiss/QPQ-AG/gmhive_protocol.git",
|
{git, "https://git.qpq.swiss/QPQ-AG/gmhive_protocol.git",
|
||||||
{ref, "818ce33"}}},
|
{ref, "8d4652a"}}},
|
||||||
{gmhive_worker, {git, "https://git.qpq.swiss/QPQ-AG/gmhive_worker", {ref, "cabd104114"}}},
|
{gmhive_worker, {git, "https://git.qpq.swiss/QPQ-AG/gmhive_worker", {ref, "cabd104114"}}},
|
||||||
{gmconfig, {git, "https://git.qpq.swiss/QPQ-AG/gmconfig.git",
|
{gmconfig, {git, "https://git.qpq.swiss/QPQ-AG/gmconfig.git",
|
||||||
{ref, "38620ff9e2"}}},
|
{ref, "38620ff9e2"}}},
|
||||||
|
|||||||
@ -25,7 +25,7 @@
|
|||||||
1},
|
1},
|
||||||
{<<"gmhive_protocol">>,
|
{<<"gmhive_protocol">>,
|
||||||
{git,"https://git.qpq.swiss/QPQ-AG/gmhive_protocol.git",
|
{git,"https://git.qpq.swiss/QPQ-AG/gmhive_protocol.git",
|
||||||
{ref,"818ce33cc1dec74c020515be48fb548a5207befd"}},
|
{ref,"8d4652a79a2ad8f51e1fe560c15c698d4c92485c"}},
|
||||||
0},
|
0},
|
||||||
{<<"gmhive_worker">>,
|
{<<"gmhive_worker">>,
|
||||||
{git,"https://git.qpq.swiss/QPQ-AG/gmhive_worker",
|
{git,"https://git.qpq.swiss/QPQ-AG/gmhive_worker",
|
||||||
|
|||||||
@ -1,6 +1,6 @@
|
|||||||
%% -*- mode: erlang; erlang-indent-level: 4; indent-tabs-mode: nil -*-
|
%% -*- mode: erlang; erlang-indent-level: 4; indent-tabs-mode: nil -*-
|
||||||
-module(gmhc_app).
|
-module(gmhc_app).
|
||||||
-vsn("0.6.1").
|
-vsn("0.8.3").
|
||||||
|
|
||||||
-behaviour(application).
|
-behaviour(application).
|
||||||
|
|
||||||
@ -17,11 +17,11 @@
|
|||||||
-spec start([{atom(), any()}]) -> {ok, [atom()]} | {error, any()}.
|
-spec start([{atom(), any()}]) -> {ok, [atom()]} | {error, any()}.
|
||||||
start(Opts) ->
|
start(Opts) ->
|
||||||
application:load(gmhive_client),
|
application:load(gmhive_client),
|
||||||
{error,_} = application:stop(gmhive_client),
|
_ = application:stop(gmhive_client),
|
||||||
_ = lists:foreach(fun({K, V}) ->
|
_ = lists:foreach(fun({K, V}) ->
|
||||||
application:set_env(gmhive_client, K, V)
|
application:set_env(gmhive_client, K, V)
|
||||||
end, Opts),
|
end, Opts),
|
||||||
application:ensure_all_started(gmhive_client).
|
application:ensure_all_started(gmhive_client, permanent).
|
||||||
|
|
||||||
start(_StartType, _StartArgs) ->
|
start(_StartType, _StartArgs) ->
|
||||||
set_things_up(),
|
set_things_up(),
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
-module(gmhc_config).
|
-module(gmhc_config).
|
||||||
-vsn("0.6.1").
|
-vsn("0.8.3").
|
||||||
|
|
||||||
-export([ load_config/0
|
-export([ load_config/0
|
||||||
, get_config/1
|
, get_config/1
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
-module(gmhc_config_schema).
|
-module(gmhc_config_schema).
|
||||||
-vsn("0.6.1").
|
-vsn("0.8.3").
|
||||||
|
|
||||||
-export([ schema/0
|
-export([ schema/0
|
||||||
, to_json/0
|
, to_json/0
|
||||||
@ -51,6 +51,7 @@ schema() ->
|
|||||||
, pool => pool()
|
, pool => pool()
|
||||||
, pool_admin => pool_admin()
|
, pool_admin => pool_admin()
|
||||||
, workers => workers()
|
, workers => workers()
|
||||||
|
, cache_dir => str(#{description => <<"Location of cache, default is 'setup:data_dir()'">>})
|
||||||
, report => str(#{ enum => [<<"debug">>, <<"progress">>, <<"silent">>]
|
, report => str(#{ enum => [<<"debug">>, <<"progress">>, <<"silent">>]
|
||||||
, default => <<"silent">>
|
, default => <<"silent">>
|
||||||
, description => <<"Progress reporting">> })
|
, description => <<"Progress reporting">> })
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
-module(gmhc_connector).
|
-module(gmhc_connector).
|
||||||
-vsn("0.6.1").
|
-vsn("0.8.3").
|
||||||
|
|
||||||
-behaviour(gen_server).
|
-behaviour(gen_server).
|
||||||
|
|
||||||
@ -56,7 +56,11 @@
|
|||||||
id :: non_neg_integer()
|
id :: non_neg_integer()
|
||||||
, auto_connect = true :: boolean()
|
, auto_connect = true :: boolean()
|
||||||
, econn
|
, econn
|
||||||
|
, connected = false :: boolean()
|
||||||
|
, status = disconnected :: disconnected | connecting | connected
|
||||||
|
, reconnect = true :: boolean()
|
||||||
, reconnect_timer :: timer_ref() | 'undefined'
|
, reconnect_timer :: timer_ref() | 'undefined'
|
||||||
|
, recache_timer :: reference() | 'undefined'
|
||||||
, awaiting_connect = [] :: list()
|
, awaiting_connect = [] :: list()
|
||||||
, protocol :: binary() | 'undefined'
|
, protocol :: binary() | 'undefined'
|
||||||
, version :: binary() | 'undefined'
|
, version :: binary() | 'undefined'
|
||||||
@ -141,6 +145,9 @@ init(#{id := Id} = Opts) when is_map(Opts) ->
|
|||||||
{ok, S} ->
|
{ok, S} ->
|
||||||
?LOG_DEBUG("Initial connect succeeded", []),
|
?LOG_DEBUG("Initial connect succeeded", []),
|
||||||
S;
|
S;
|
||||||
|
{error, rejected} ->
|
||||||
|
?LOG_WARNING("Connection rejected; will not retry", []),
|
||||||
|
S0#st{econn = undefined, reconnect = false};
|
||||||
{error, _} = Error ->
|
{error, _} = Error ->
|
||||||
?LOG_WARNING("Could not connect to core server: ~p", [Error]),
|
?LOG_WARNING("Could not connect to core server: ~p", [Error]),
|
||||||
start_reconnect_timer(S0#st{econn = undefined})
|
start_reconnect_timer(S0#st{econn = undefined})
|
||||||
@ -179,6 +186,7 @@ handle_call({connect, Opts}, From, #st{awaiting_connect = Waiters} = S) ->
|
|||||||
end,
|
end,
|
||||||
S1 = start_reconnect_timer(
|
S1 = start_reconnect_timer(
|
||||||
S#st{ auto_connect = true
|
S#st{ auto_connect = true
|
||||||
|
, reconnect = true
|
||||||
, awaiting_connect = Waiters1 }, Opts),
|
, awaiting_connect = Waiters1 }, Opts),
|
||||||
{noreply, S1};
|
{noreply, S1};
|
||||||
false ->
|
false ->
|
||||||
@ -239,12 +247,21 @@ handle_info({timeout, TRef, {reconnect, Opts}}, #st{ reconnect_timer = {TRef, _,
|
|||||||
end;
|
end;
|
||||||
handle_info({tcp_closed, _Port}, #st{} = S) ->
|
handle_info({tcp_closed, _Port}, #st{} = S) ->
|
||||||
?LOG_DEBUG("got tcp_closed", []),
|
?LOG_DEBUG("got tcp_closed", []),
|
||||||
disconnected(S#st.id),
|
S1 = disconnected(S#st.id, S),
|
||||||
S1 = case S#st.auto_connect of
|
S2 = case S1#st.auto_connect of
|
||||||
true -> start_reconnect_timer(S#st{econn = undefined});
|
true -> start_reconnect_timer(S1#st{econn = undefined});
|
||||||
false -> S#st{econn = undefined}
|
false -> S1#st{econn = undefined}
|
||||||
end,
|
end,
|
||||||
{noreply, S1};
|
{noreply, S2};
|
||||||
|
handle_info({timeout, _, recache}, #st{opts = Opts, econn = EConn} = S0) ->
|
||||||
|
S = S0#st{recache_timer = undefined},
|
||||||
|
case (EConn =/= undefined) andalso S#st.connected of
|
||||||
|
false ->
|
||||||
|
{noreply, S};
|
||||||
|
true ->
|
||||||
|
cache_eureka_info(Opts),
|
||||||
|
{noreply, ensure_recache_timer(S)}
|
||||||
|
end;
|
||||||
handle_info(Msg, S) ->
|
handle_info(Msg, S) ->
|
||||||
?LOG_DEBUG("Discarding msg (auto_connect=~p): ~p", [S#st.auto_connect, Msg]),
|
?LOG_DEBUG("Discarding msg (auto_connect=~p): ~p", [S#st.auto_connect, Msg]),
|
||||||
{noreply, S}.
|
{noreply, S}.
|
||||||
@ -264,6 +281,8 @@ code_change(_FromVsn, S, _Extra) ->
|
|||||||
try_connect(Opts, S) ->
|
try_connect(Opts, S) ->
|
||||||
try try_connect_(Opts, S)
|
try try_connect_(Opts, S)
|
||||||
catch
|
catch
|
||||||
|
error:rejected:_ ->
|
||||||
|
{error, rejected};
|
||||||
error:E:T ->
|
error:E:T ->
|
||||||
?LOG_ERROR("Unexpected error connecting: ~p / ~p", [E, T]),
|
?LOG_ERROR("Unexpected error connecting: ~p / ~p", [E, T]),
|
||||||
{error, E}
|
{error, E}
|
||||||
@ -277,8 +296,9 @@ try_connect_(Opts0, S) ->
|
|||||||
case try_noise_connect(maps:merge(Opts0, PoolOpts)) of
|
case try_noise_connect(maps:merge(Opts0, PoolOpts)) of
|
||||||
{ok, EConn, Opts1} ->
|
{ok, EConn, Opts1} ->
|
||||||
S1 = protocol_connect(Opts1, S#st{ econn = EConn
|
S1 = protocol_connect(Opts1, S#st{ econn = EConn
|
||||||
|
, status = connecting
|
||||||
, reconnect_timer = undefined }),
|
, reconnect_timer = undefined }),
|
||||||
{ok, S1};
|
{ok, S1#st{status = connected}};
|
||||||
{error, _} = Error ->
|
{error, _} = Error ->
|
||||||
Error
|
Error
|
||||||
end
|
end
|
||||||
@ -286,12 +306,12 @@ try_connect_(Opts0, S) ->
|
|||||||
|
|
||||||
eureka_get_host_port() ->
|
eureka_get_host_port() ->
|
||||||
case gmhc_eureka:get_pool_address() of
|
case gmhc_eureka:get_pool_address() of
|
||||||
#{<<"address">> := Host,
|
{ok, #{host := Host,
|
||||||
<<"port">> := Port,
|
port := Port,
|
||||||
<<"pool_id">> := PoolId} ->
|
pool_id := PoolId}} ->
|
||||||
#{host => binary_to_list(Host),
|
#{host => unicode:characters_to_list(Host),
|
||||||
port => Port,
|
port => Port,
|
||||||
pool_id => binary_to_list(PoolId)};
|
pool_id => unicode:characters_to_list(PoolId)};
|
||||||
{error, _} = Error ->
|
{error, _} = Error ->
|
||||||
Error
|
Error
|
||||||
end.
|
end.
|
||||||
@ -333,6 +353,8 @@ default_tcp_opts() ->
|
|||||||
enoise_opts() ->
|
enoise_opts() ->
|
||||||
[{noise, <<"Noise_NN_25519_ChaChaPoly_BLAKE2b">>}].
|
[{noise, <<"Noise_NN_25519_ChaChaPoly_BLAKE2b">>}].
|
||||||
|
|
||||||
|
start_reconnect_timer(#st{reconnect = false} = S) ->
|
||||||
|
S;
|
||||||
start_reconnect_timer(#st{} = S) ->
|
start_reconnect_timer(#st{} = S) ->
|
||||||
start_reconnect_timer(S, #{}).
|
start_reconnect_timer(S, #{}).
|
||||||
|
|
||||||
@ -344,11 +366,18 @@ start_reconnect_timer(#st{} = S, Opts) ->
|
|||||||
false ->
|
false ->
|
||||||
?LOG_DEBUG("starting reconnect timer ...", []),
|
?LOG_DEBUG("starting reconnect timer ...", []),
|
||||||
TRef = start_timer(1000, Opts),
|
TRef = start_timer(1000, Opts),
|
||||||
S#st{reconnect_timer = {TRef, 10, 1000, Opts}}
|
S#st{reconnect_timer = {TRef, 10, 8000 + gmhc_lib:rand(3000), Opts}}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
restart_reconnect_timer(#st{reconnect = false} = S) ->
|
||||||
|
cancel_reconnect_timer(S);
|
||||||
restart_reconnect_timer(#st{reconnect_timer = {_, 0, T, Opts}} = S) ->
|
restart_reconnect_timer(#st{reconnect_timer = {_, 0, T, Opts}} = S) ->
|
||||||
NewT = max(T * 2, ?MAX_RETRY_INTERVAL),
|
NewT = max(T + 5000 + gmhc_lib:rand(1000), ?MAX_RETRY_INTERVAL),
|
||||||
|
if (NewT > T andalso NewT =:= ?MAX_RETRY_INTERVAL) ->
|
||||||
|
gmhc_eureka:invalidate_cache();
|
||||||
|
true ->
|
||||||
|
ok
|
||||||
|
end,
|
||||||
TRef = start_timer(NewT, Opts),
|
TRef = start_timer(NewT, Opts),
|
||||||
S#st{reconnect_timer = {TRef, 10, NewT, Opts}};
|
S#st{reconnect_timer = {TRef, 10, NewT, Opts}};
|
||||||
restart_reconnect_timer(#st{reconnect_timer = {_, N, T, Opts}} = S) ->
|
restart_reconnect_timer(#st{reconnect_timer = {_, N, T, Opts}} = S) ->
|
||||||
@ -373,11 +402,25 @@ notify_deadline(D, #st{awaiting_connect = Waiters} = S) ->
|
|||||||
end, [], Waiters),
|
end, [], Waiters),
|
||||||
S#st{awaiting_connect = Waiters1}.
|
S#st{awaiting_connect = Waiters1}.
|
||||||
|
|
||||||
notify_connected(#st{id = Id, awaiting_connect = Waiters} = S) ->
|
cache_eureka_info(Opts) ->
|
||||||
|
gmhc_eureka:cache_good_address(maps:with([host, port, pool_id], Opts)).
|
||||||
|
|
||||||
|
notify_connected(#st{id = Id, awaiting_connect = Waiters, opts = Opts} = S) ->
|
||||||
gmhc_events:publish(connected, #{id => Id}),
|
gmhc_events:publish(connected, #{id => Id}),
|
||||||
[gen_server:reply(From, ok) || {From, _} <- Waiters],
|
[gen_server:reply(From, ok) || {From, _} <- Waiters],
|
||||||
gmhc_handler:pool_connected(S#st.id, S#st.opts),
|
gmhc_handler:pool_connected(S#st.id, S#st.opts),
|
||||||
S#st{awaiting_connect = []}.
|
cache_eureka_info(Opts),
|
||||||
|
gmhc_connectors_sup:add_restart_info(Id, Opts),
|
||||||
|
ensure_recache_timer(S#st{awaiting_connect = []}).
|
||||||
|
|
||||||
|
ensure_recache_timer(#st{recache_timer = T} = S) ->
|
||||||
|
case T of
|
||||||
|
undefined ->
|
||||||
|
TRef = erlang:start_timer(1*60*1000, self(), recache),
|
||||||
|
S#st{recache_timer = TRef};
|
||||||
|
_ when is_reference(T) ->
|
||||||
|
S
|
||||||
|
end.
|
||||||
|
|
||||||
cancel_reconnect_timer(#st{reconnect_timer = T} = S) ->
|
cancel_reconnect_timer(#st{reconnect_timer = T} = S) ->
|
||||||
case T of
|
case T of
|
||||||
@ -401,17 +444,32 @@ protocol_connect(Opts, #st{econn = EConn} = S) ->
|
|||||||
Type = to_atom(opt(type, Opts, [<<"type">>])),
|
Type = to_atom(opt(type, Opts, [<<"type">>])),
|
||||||
RId = erlang:unique_integer(),
|
RId = erlang:unique_integer(),
|
||||||
Vsns = gmhp_msgs:versions(),
|
Vsns = gmhp_msgs:versions(),
|
||||||
|
Client = client_name(),
|
||||||
Protocols = gmhp_msgs:protocols(hd(Vsns)),
|
Protocols = gmhp_msgs:protocols(hd(Vsns)),
|
||||||
ConnectReq = #{ protocols => Protocols
|
ConnectReq = #{ protocols => Protocols
|
||||||
, versions => Vsns
|
, versions => Vsns
|
||||||
, pool_id => PoolId
|
, pool_id => PoolId
|
||||||
, pubkey => Pubkey
|
, pubkey => Pubkey
|
||||||
, extra_pubkeys => Extra
|
, extra_pubkeys => Extra
|
||||||
|
, client => Client
|
||||||
, type => Type
|
, type => Type
|
||||||
, nonces => gmhc_server:total_nonces()
|
, nonces => gmhc_server:total_nonces()
|
||||||
, signature => ""},
|
, signature => ""},
|
||||||
?LOG_DEBUG("ConnectReq = ~p", [ConnectReq]),
|
?LOG_DEBUG("ConnectReq = ~p", [ConnectReq]),
|
||||||
Msg = gmhp_msgs:encode_connect(ConnectReq, RId),
|
try gmhp_msgs:encode_connect(ConnectReq, RId) of
|
||||||
|
Msg ->
|
||||||
|
send_connect(EConn, RId, Msg, ConnectReq, Opts, S)
|
||||||
|
catch error:Error ->
|
||||||
|
ErrMsg = unicode:characters_to_binary(io_lib:fwrite("~p", [Error])),
|
||||||
|
disconnected(S#st.id, #{error =>
|
||||||
|
#{code => gmhp_msgs:error_code(invalid_input),
|
||||||
|
message => ErrMsg}}, S)
|
||||||
|
end.
|
||||||
|
|
||||||
|
send_connect(EConn, RId, Msg, #{pubkey := Pubkey,
|
||||||
|
extra_pubkeys := Extra,
|
||||||
|
pool_id := PoolId,
|
||||||
|
type := Type}, Opts, S) ->
|
||||||
enoise:send(EConn, Msg),
|
enoise:send(EConn, Msg),
|
||||||
receive
|
receive
|
||||||
{noise, EConn, Data} ->
|
{noise, EConn, Data} ->
|
||||||
@ -420,20 +478,41 @@ protocol_connect(Opts, #st{econn = EConn} = S) ->
|
|||||||
, result := #{connect_ack := #{ protocol := P
|
, result := #{connect_ack := #{ protocol := P
|
||||||
, version := V }}
|
, version := V }}
|
||||||
}} ->
|
}} ->
|
||||||
connected(S#st.id, Type),
|
S1 = connected(S#st.id, Type, S),
|
||||||
Opts1 = Opts#{ pubkey => Pubkey
|
Opts1 = Opts#{ pubkey => Pubkey
|
||||||
, extra => Extra
|
, extra => Extra
|
||||||
, pool_id => PoolId
|
, pool_id => PoolId
|
||||||
, type => Type },
|
, type => Type },
|
||||||
notify_connected(S#st{protocol = P, version = V, opts = Opts1});
|
notify_connected(S1#st{protocol = P, version = V, opts = Opts1});
|
||||||
#{error := #{message := Msg}} ->
|
#{error := #{code := _, message := ErrMsg}} = ErrReply ->
|
||||||
?LOG_ERROR("Connect error: ~s", [Msg]),
|
?LOG_ERROR("Connect error: ~s", [ErrMsg]),
|
||||||
error(protocol_connect)
|
%% TODO: fix the flow so that we send one disconnected event,
|
||||||
|
%% and set the reconnect in the right place. For now, stuff
|
||||||
|
%% the `reconnect = false`.
|
||||||
|
disconnected(S#st.id, ErrReply, S#st{reconnect = false}),
|
||||||
|
gmhc_eureka:invalidate_cache(),
|
||||||
|
error(rejected)
|
||||||
end
|
end
|
||||||
after 10000 ->
|
after 10000 ->
|
||||||
|
gmhc_eureka:invalidate_cache(),
|
||||||
error(protocol_connect_timeout)
|
error(protocol_connect_timeout)
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
client_name() ->
|
||||||
|
MyStr = app_string(gmhive_client),
|
||||||
|
case app_string(gajumine) of
|
||||||
|
<<>> -> MyStr;
|
||||||
|
GMStr -> <<MyStr/binary,",",GMStr/binary>>
|
||||||
|
end.
|
||||||
|
|
||||||
|
app_string(App) ->
|
||||||
|
case application:get_key(App, vsn) of
|
||||||
|
undefined ->
|
||||||
|
<<>>;
|
||||||
|
{ok, Vsn} ->
|
||||||
|
unicode:characters_to_binary([atom_to_binary(App),"-",Vsn])
|
||||||
|
end.
|
||||||
|
|
||||||
to_bin(A) when is_atom(A) ->
|
to_bin(A) when is_atom(A) ->
|
||||||
atom_to_binary(A, utf8);
|
atom_to_binary(A, utf8);
|
||||||
to_bin(S) ->
|
to_bin(S) ->
|
||||||
@ -443,12 +522,18 @@ to_atom(A) when is_atom(A) -> A;
|
|||||||
to_atom(S) ->
|
to_atom(S) ->
|
||||||
binary_to_existing_atom(iolist_to_binary(S), utf8).
|
binary_to_existing_atom(iolist_to_binary(S), utf8).
|
||||||
|
|
||||||
connected(Id, Type) when Type==worker; Type==monitor ->
|
connected(Id, Type, S) when Type==worker; Type==monitor ->
|
||||||
gmhc_server:connected(Id, Type).
|
gmhc_server:connected(Id, Type),
|
||||||
|
S#st{connected = true}.
|
||||||
|
|
||||||
disconnected(Id) ->
|
disconnected(Id, S) ->
|
||||||
gmhc_events:publish(disconnected, #{id => Id}),
|
disconnected(Id, #{}, S).
|
||||||
gmhc_server:disconnected(Id).
|
|
||||||
|
disconnected(_, _, #st{status = disconnected} = S) -> S;
|
||||||
|
disconnected(Id, Msg, #st{reconnect = Bool} = S) ->
|
||||||
|
gmhc_events:publish(disconnected, Msg#{id => Id, reconnecting => Bool}),
|
||||||
|
gmhc_server:disconnected(Id),
|
||||||
|
S#st{connected = false, status = disconnected}.
|
||||||
|
|
||||||
opt_autoconnect(#{auto_connect := Bool}) when is_boolean(Bool) ->
|
opt_autoconnect(#{auto_connect := Bool}) when is_boolean(Bool) ->
|
||||||
Bool;
|
Bool;
|
||||||
|
|||||||
@ -1,15 +1,37 @@
|
|||||||
-module(gmhc_connectors_sup).
|
-module(gmhc_connectors_sup).
|
||||||
-vsn("0.6.1").
|
-vsn("0.8.3").
|
||||||
-behavior(supervisor).
|
-behavior(supervisor).
|
||||||
|
|
||||||
-export([ start_link/0
|
-export([ start_link/0
|
||||||
, init/1 ]).
|
, init/1 ]).
|
||||||
|
|
||||||
-export([ start_first_connector/0
|
-export([ start_first_connector/0
|
||||||
, start_connector/1]).
|
, start_connector/1
|
||||||
|
, add_restart_info/2 ]).
|
||||||
|
|
||||||
|
-export([ sort_restart_info/1 ]).
|
||||||
|
|
||||||
|
-include_lib("kernel/include/logger.hrl").
|
||||||
|
|
||||||
start_link() ->
|
start_link() ->
|
||||||
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
|
case supervisor:start_link({local, ?MODULE}, ?MODULE, []) of
|
||||||
|
{ok, _} = Ok ->
|
||||||
|
RI0 = gmhc_watchdog:get_restart_info(),
|
||||||
|
?LOG_ERROR("Restart info: ~p", [RI0]),
|
||||||
|
RI = sort_restart_info(RI0),
|
||||||
|
RemoveIds = maps:keys(maps:without(maps:keys(RI), RI0)),
|
||||||
|
gmhc_watchdog:remove_restart_info(RemoveIds),
|
||||||
|
?LOG_ERROR("Sorted restart info: ~p", [RI]),
|
||||||
|
maps:foreach(fun restart_connector/2, RI),
|
||||||
|
Ok;
|
||||||
|
Other ->
|
||||||
|
Other
|
||||||
|
end.
|
||||||
|
|
||||||
|
restart_connector({?MODULE, _}, Opts) ->
|
||||||
|
start_connector(Opts);
|
||||||
|
restart_connector(_, _) ->
|
||||||
|
ok.
|
||||||
|
|
||||||
start_first_connector() ->
|
start_first_connector() ->
|
||||||
start_connector(#{}).
|
start_connector(#{}).
|
||||||
@ -21,11 +43,14 @@ start_connector(Opts0) ->
|
|||||||
end,
|
end,
|
||||||
supervisor:start_child(?MODULE, [Opts]).
|
supervisor:start_child(?MODULE, [Opts]).
|
||||||
|
|
||||||
|
add_restart_info(Id, Opts) ->
|
||||||
|
gmhc_watchdog:note_started({?MODULE, Id}, Opts).
|
||||||
|
|
||||||
init([]) ->
|
init([]) ->
|
||||||
Mod = gmhc_connector,
|
Mod = gmhc_connector,
|
||||||
SupFlags = #{ strategy => simple_one_for_one
|
SupFlags = #{ strategy => simple_one_for_one
|
||||||
, intensity => 3
|
, intensity => 5
|
||||||
, period => 10 },
|
, period => 60 },
|
||||||
ChildSpecs = [ #{ id => Mod
|
ChildSpecs = [ #{ id => Mod
|
||||||
, start => {Mod, start_link, []}
|
, start => {Mod, start_link, []}
|
||||||
, type => worker
|
, type => worker
|
||||||
@ -33,3 +58,13 @@ init([]) ->
|
|||||||
, shutdown => 5000
|
, shutdown => 5000
|
||||||
, modules => [Mod] } ],
|
, modules => [Mod] } ],
|
||||||
{ok, {SupFlags, ChildSpecs}}.
|
{ok, {SupFlags, ChildSpecs}}.
|
||||||
|
|
||||||
|
sort_restart_info(RI) ->
|
||||||
|
L = lists:sort(
|
||||||
|
[{Id, {T,H,P}, I}
|
||||||
|
|| {{?MODULE,_} = Id, #{type := T, host := H, port := P} = I} <- maps:to_list(RI)]),
|
||||||
|
ConnTypes = [CT || {_, CT, _} <- lists:ukeysort(2, L)],
|
||||||
|
lists:foldl(fun(CT, Acc) ->
|
||||||
|
{Id, _, I} = lists:keyfind(CT, 2, L),
|
||||||
|
Acc#{Id => I}
|
||||||
|
end, #{}, ConnTypes).
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
-module(gmhc_counters).
|
-module(gmhc_counters).
|
||||||
-vsn("0.6.1").
|
-vsn("0.8.3").
|
||||||
|
|
||||||
-export([ initialize/0 ]).
|
-export([ initialize/0 ]).
|
||||||
|
|
||||||
|
|||||||
@ -1,20 +1,136 @@
|
|||||||
-module(gmhc_eureka).
|
-module(gmhc_eureka).
|
||||||
-vsn("0.6.1").
|
-vsn("0.8.3").
|
||||||
|
|
||||||
-export([get_pool_address/0]).
|
-export([get_pool_address/0]).
|
||||||
|
-export([cache_good_address/1,
|
||||||
|
invalidate_cache/0,
|
||||||
|
cached_address/0,
|
||||||
|
cache_filename/0,
|
||||||
|
cache_dir/0]).
|
||||||
|
|
||||||
-include_lib("kernel/include/logger.hrl").
|
-include_lib("kernel/include/logger.hrl").
|
||||||
-include("gmhc_events.hrl").
|
-include("gmhc_events.hrl").
|
||||||
|
|
||||||
get_pool_address() ->
|
get_pool_address() ->
|
||||||
|
case cached_address() of
|
||||||
|
{ok, _} = Ok -> Ok;
|
||||||
|
{error, _} ->
|
||||||
|
get_pool_address_()
|
||||||
|
end.
|
||||||
|
|
||||||
|
cached_address() ->
|
||||||
|
I0 = cache_info(),
|
||||||
|
CacheF = cache_filename(I0),
|
||||||
|
?LOG_DEBUG("Eureka cache filename: ~p", [CacheF]),
|
||||||
|
case file:read_file(CacheF) of
|
||||||
|
{ok, Bin} ->
|
||||||
|
NowTS = erlang:system_time(seconds),
|
||||||
|
OldestTS = NowTS - 24*60*60,
|
||||||
|
try binary_to_term(Bin) of
|
||||||
|
#{ ts := TS
|
||||||
|
, network := N
|
||||||
|
, pubkey := PK
|
||||||
|
, host := _
|
||||||
|
, port := _
|
||||||
|
, pool_id := _} = Map when N == map_get(network, I0),
|
||||||
|
PK == map_get(pubkey, I0) ->
|
||||||
|
if TS >= OldestTS ->
|
||||||
|
Result = maps:remove(ts, Map),
|
||||||
|
?LOG_DEBUG("Cached eureka info: ~p", [Result]),
|
||||||
|
{ok, Result};
|
||||||
|
true ->
|
||||||
|
{error, outdated}
|
||||||
|
end;
|
||||||
|
Other ->
|
||||||
|
{error, {invalid_cache_term, Other}}
|
||||||
|
catch
|
||||||
|
error:E ->
|
||||||
|
{error, {invalid_cache_data, E}}
|
||||||
|
end;
|
||||||
|
{error, _} = Err ->
|
||||||
|
Err
|
||||||
|
end.
|
||||||
|
|
||||||
|
cache_good_address(#{host := _,
|
||||||
|
port := _,
|
||||||
|
pool_id := _} = I0) ->
|
||||||
|
CacheInfo = cache_info(I0),
|
||||||
|
CacheF = cache_filename(CacheInfo),
|
||||||
|
|
||||||
|
ToCache = CacheInfo#{ts => erlang:system_time(seconds)},
|
||||||
|
case filelib:ensure_dir(CacheF) of
|
||||||
|
ok ->
|
||||||
|
case file:write_file(CacheF, term_to_binary(ToCache)) of
|
||||||
|
ok ->
|
||||||
|
{ok, ToCache};
|
||||||
|
{error, _} = Err ->
|
||||||
|
?LOG_DEBUG("Couldn't cache eureka in ~p: ~p", [CacheF, Err]),
|
||||||
|
Err
|
||||||
|
end;
|
||||||
|
{error, _} = Err ->
|
||||||
|
?LOG_ERROR("Cannot save cached info to ~s", [CacheF]),
|
||||||
|
Err
|
||||||
|
end.
|
||||||
|
|
||||||
|
invalidate_cache() ->
|
||||||
|
CacheF = cache_filename(),
|
||||||
|
case file:delete(CacheF) of
|
||||||
|
ok ->
|
||||||
|
?LOG_DEBUG("Eureka cache file removed (~p)", [CacheF]),
|
||||||
|
ok;
|
||||||
|
{error, _} = Err ->
|
||||||
|
?LOG_DEBUG("Couldn't remove Eureka cache (~p): ~p", [CacheF, Err]),
|
||||||
|
Err
|
||||||
|
end.
|
||||||
|
|
||||||
|
cache_info(#{ host := Addr
|
||||||
|
, port := Port
|
||||||
|
, pool_id := PoolId }) ->
|
||||||
|
I0 = cache_info(),
|
||||||
|
I0#{ host => unicode:characters_to_binary(Addr)
|
||||||
|
, port => Port
|
||||||
|
, pool_id => unicode:characters_to_binary(PoolId)}.
|
||||||
|
|
||||||
|
cache_info() ->
|
||||||
|
Pubkey = gmhc_config:get_config([<<"pubkey">>]),
|
||||||
|
Network = gmhc_config:get_config([<<"network">>]),
|
||||||
|
#{ pubkey => Pubkey
|
||||||
|
, network => Network }.
|
||||||
|
|
||||||
|
cache_filename() ->
|
||||||
|
cache_filename(cache_info()).
|
||||||
|
|
||||||
|
cache_filename(#{network := Network, pubkey := Pubkey}) ->
|
||||||
|
Path = filename:join(cache_dir(), Network),
|
||||||
|
<<"ak_", PKShort:8/binary, _/binary>> = Pubkey,
|
||||||
|
filename:join(Path, "gmhc_eureka." ++ binary_to_list(PKShort) ++ ".cache").
|
||||||
|
|
||||||
|
cache_dir() ->
|
||||||
|
case gmconfig:find_config([<<"cache_dir">>]) of
|
||||||
|
{ok, D} ->
|
||||||
|
D;
|
||||||
|
undefined ->
|
||||||
|
case setup_zomp:is_zomp_context() of
|
||||||
|
true ->
|
||||||
|
cache_dir_zomp();
|
||||||
|
false ->
|
||||||
|
filename:join(setup:data_dir(), "gmhive.cache")
|
||||||
|
end
|
||||||
|
end.
|
||||||
|
|
||||||
|
cache_dir_zomp() ->
|
||||||
|
#{package_id := {Realm, App, _}} = zx_daemon:meta(),
|
||||||
|
filename:join(zx_lib:ppath(var, {Realm, App}), "gmhive.cache").
|
||||||
|
|
||||||
|
get_pool_address_() ->
|
||||||
case gmconfig:find_config([<<"pool_admin">>, <<"url">>], [user_config]) of
|
case gmconfig:find_config([<<"pool_admin">>, <<"url">>], [user_config]) of
|
||||||
{ok, URL0} ->
|
{ok, URL0} ->
|
||||||
case expand_url(URL0) of
|
case expand_url(URL0) of
|
||||||
<<"local">> ->
|
<<"local">> ->
|
||||||
#{<<"address">> => <<"127.0.0.1">>,
|
{ok, #{host => <<"127.0.0.1">>,
|
||||||
<<"port">> => gmconfig:get_config(
|
port => gmconfig:get_config(
|
||||||
[<<"pool">>, <<"port">>], [schema_default]),
|
[<<"pool">>, <<"port">>], [schema_default]),
|
||||||
<<"pool_id">> => gmhc_config:get_config([<<"pool">>, <<"id">>]) };
|
pool_id => gmhc_config:get_config([<<"pool">>, <<"id">>]) }};
|
||||||
URL ->
|
URL ->
|
||||||
?LOG_INFO("Trying to connect to ~p", [URL]),
|
?LOG_INFO("Trying to connect to ~p", [URL]),
|
||||||
connect1(URL)
|
connect1(URL)
|
||||||
@ -49,9 +165,13 @@ connect1(URL0) ->
|
|||||||
Error
|
Error
|
||||||
end.
|
end.
|
||||||
|
|
||||||
get_host_port(Data) ->
|
get_host_port(#{ <<"address">> := Addr
|
||||||
|
, <<"port">> := Port
|
||||||
|
, <<"pool_id">> := PoolId } = Data) ->
|
||||||
?LOG_DEBUG("Data = ~p", [Data]),
|
?LOG_DEBUG("Data = ~p", [Data]),
|
||||||
maps:with([<<"address">>, <<"port">>, <<"pool_id">>], Data).
|
{ok, #{ host => Addr
|
||||||
|
, port => Port
|
||||||
|
, pool_id => PoolId }}.
|
||||||
|
|
||||||
request(get, URL) ->
|
request(get, URL) ->
|
||||||
case request(get, URL, []) of
|
case request(get, URL, []) of
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
-module(gmhc_events).
|
-module(gmhc_events).
|
||||||
-vsn("0.6.1").
|
-vsn("0.8.3").
|
||||||
|
|
||||||
-export([subscribe/1,
|
-export([subscribe/1,
|
||||||
ensure_subscribed/1,
|
ensure_subscribed/1,
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
-module(gmhc_handler).
|
-module(gmhc_handler).
|
||||||
-vsn("0.6.1").
|
-vsn("0.8.3").
|
||||||
-behavior(gen_server).
|
-behavior(gen_server).
|
||||||
|
|
||||||
-export([ start_link/0
|
-export([ start_link/0
|
||||||
@ -28,7 +28,7 @@
|
|||||||
|
|
||||||
-record(st, {pools = [], opts = #{}}).
|
-record(st, {pools = [], opts = #{}}).
|
||||||
|
|
||||||
-define(CALL_TIMEOUT, 5000).
|
-define(CALL_TIMEOUT, 10_000).
|
||||||
|
|
||||||
-include_lib("kernel/include/logger.hrl").
|
-include_lib("kernel/include/logger.hrl").
|
||||||
|
|
||||||
|
|||||||
9
src/gmhc_lib.erl
Normal file
9
src/gmhc_lib.erl
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
-module(gmhc_lib).
|
||||||
|
-vsn("0.8.3").
|
||||||
|
|
||||||
|
-export([ rand/1 ]).
|
||||||
|
|
||||||
|
|
||||||
|
rand(Range) ->
|
||||||
|
<<Rx:32>> = crypto:strong_rand_bytes(4),
|
||||||
|
Range - (Rx rem Range).
|
||||||
@ -1,5 +1,5 @@
|
|||||||
-module(gmhc_server).
|
-module(gmhc_server).
|
||||||
-vsn("0.6.1").
|
-vsn("0.8.3").
|
||||||
|
|
||||||
-behaviour(gen_server).
|
-behaviour(gen_server).
|
||||||
|
|
||||||
@ -92,7 +92,9 @@ handle_call({connected, Id, Type}, {Pid,_}, #st{connected = Conn} = S) ->
|
|||||||
monitor ->
|
monitor ->
|
||||||
stop_workers(S1#st.workers), % shouldn't be any running
|
stop_workers(S1#st.workers), % shouldn't be any running
|
||||||
S1#st{workers = [], working = false};
|
S1#st{workers = [], working = false};
|
||||||
worker -> S1#st{working = true}
|
worker ->
|
||||||
|
gmhc_watchdog:watch(5*60_000, 1), %% 5 minutes, one-shot
|
||||||
|
S1#st{working = true}
|
||||||
end,
|
end,
|
||||||
{reply, ok, S2};
|
{reply, ok, S2};
|
||||||
handle_call(_Req, _From, S) ->
|
handle_call(_Req, _From, S) ->
|
||||||
@ -126,6 +128,7 @@ handle_cast({from_pool, #{via := Connector,
|
|||||||
handle_cast({disconnected, Id}, #st{connected = Conn} = S) ->
|
handle_cast({disconnected, Id}, #st{connected = Conn} = S) ->
|
||||||
?LOG_DEBUG("disconnected: ~p", [Id]),
|
?LOG_DEBUG("disconnected: ~p", [Id]),
|
||||||
Conn1 = maps:remove(Id, Conn),
|
Conn1 = maps:remove(Id, Conn),
|
||||||
|
gmhc_watchdog:unwatch(),
|
||||||
S1 = if map_size(Conn1) == 0 ->
|
S1 = if map_size(Conn1) == 0 ->
|
||||||
Ws = stop_workers(S#st.workers),
|
Ws = stop_workers(S#st.workers),
|
||||||
S#st{connected = Conn1, workers = Ws};
|
S#st{connected = Conn1, workers = Ws};
|
||||||
@ -162,16 +165,11 @@ handle_info({'EXIT', Pid, Reason}, #st{ workers = Workers
|
|||||||
{noreply, S}
|
{noreply, S}
|
||||||
end;
|
end;
|
||||||
handle_info({timeout, _, check_workers}, #st{workers = Workers} = S) ->
|
handle_info({timeout, _, check_workers}, #st{workers = Workers} = S) ->
|
||||||
case [W || #worker{cand = undefined} = W <- Workers] of
|
S1 = maybe_request_nonces(S),
|
||||||
[] ->
|
S2 = lists:foldl(fun(W, Sx) ->
|
||||||
{noreply, S};
|
maybe_restart_worker(W, Sx)
|
||||||
Idle ->
|
end, S1, Workers),
|
||||||
S1 = maybe_request_nonces(S),
|
{noreply, S2};
|
||||||
S2 = lists:foldl(fun(W, Sx) ->
|
|
||||||
maybe_restart_worker(W, Sx)
|
|
||||||
end, S1, Idle),
|
|
||||||
{noreply, S2}
|
|
||||||
end;
|
|
||||||
handle_info(Msg, St) ->
|
handle_info(Msg, St) ->
|
||||||
?LOG_DEBUG("Unknown msg: ~p", [Msg]),
|
?LOG_DEBUG("Unknown msg: ~p", [Msg]),
|
||||||
{noreply, St}.
|
{noreply, St}.
|
||||||
@ -216,7 +214,6 @@ maybe_request_nonces(#st{ candidate = #{via := Via, seq := Seq, nonces := Nonces
|
|||||||
, nonces = N} = S) when ?CONNECTED(S) ->
|
, nonces = N} = S) when ?CONNECTED(S) ->
|
||||||
case Nonces == [] of
|
case Nonces == [] of
|
||||||
true ->
|
true ->
|
||||||
%% ?LOG_DEBUG("Request more nonces, Seq = ~p, N = ~p", [Seq, N]),
|
|
||||||
Res = gmhc_handler:call(#{via => Via,
|
Res = gmhc_handler:call(#{via => Via,
|
||||||
get_nonces => #{ seq => Seq
|
get_nonces => #{ seq => Seq
|
||||||
, n => N }}),
|
, n => N }}),
|
||||||
@ -230,7 +227,7 @@ maybe_request_nonces(S) ->
|
|||||||
nonces_result(#{nonces := #{seq := Seq, nonces := Nonces}}, Seq0, S) ->
|
nonces_result(#{nonces := #{seq := Seq, nonces := Nonces}}, Seq0, S) ->
|
||||||
case Seq == Seq0 of
|
case Seq == Seq0 of
|
||||||
true ->
|
true ->
|
||||||
%% ?LOG_DEBUG("Got nonces = ~p", [Nonces]),
|
wd_ping(),
|
||||||
#st{candidate = Cand} = S,
|
#st{candidate = Cand} = S,
|
||||||
S#st{candidate = Cand#{nonces => Nonces}};
|
S#st{candidate = Cand#{nonces => Nonces}};
|
||||||
false ->
|
false ->
|
||||||
@ -240,10 +237,21 @@ nonces_result(#{nonces := #{seq := Seq, nonces := Nonces}}, Seq0, S) ->
|
|||||||
nonces_result({error, Reason}, Seq0, S) ->
|
nonces_result({error, Reason}, Seq0, S) ->
|
||||||
?LOG_DEBUG("Got error on nonce request: ~p", [Reason]),
|
?LOG_DEBUG("Got error on nonce request: ~p", [Reason]),
|
||||||
Workers = stop_workers_for_seq(Seq0, S#st.workers),
|
Workers = stop_workers_for_seq(Seq0, S#st.workers),
|
||||||
|
case Reason of
|
||||||
|
{timeout, _} ->
|
||||||
|
Timeout = retry_timeout(1000, 3000),
|
||||||
|
erlang:start_timer(Timeout, self(), check_workers);
|
||||||
|
_ ->
|
||||||
|
ok
|
||||||
|
end,
|
||||||
S#st{workers = Workers}.
|
S#st{workers = Workers}.
|
||||||
|
|
||||||
|
retry_timeout(Floor, Range) ->
|
||||||
|
Floor + gmhc_lib:rand(Range).
|
||||||
|
|
||||||
handle_worker_result({worker_result, Result}, W, S) ->
|
handle_worker_result({worker_result, Result}, W, S) ->
|
||||||
%% ?LOG_DEBUG("worker result: ~p", [Result]),
|
%% ?LOG_DEBUG("worker result: ~p", [Result]),
|
||||||
|
wd_ping(),
|
||||||
case Result of
|
case Result of
|
||||||
{solutions, Solutions} ->
|
{solutions, Solutions} ->
|
||||||
{Cont, S1} = report_solutions_(Solutions, W, S),
|
{Cont, S1} = report_solutions_(Solutions, W, S),
|
||||||
@ -435,3 +443,6 @@ worker_result(Pid, Result) ->
|
|||||||
decode_candidate_hash(#{candidate := C} = Cand) ->
|
decode_candidate_hash(#{candidate := C} = Cand) ->
|
||||||
{ok, Hash} = gmser_api_encoder:safe_decode(bytearray, C),
|
{ok, Hash} = gmser_api_encoder:safe_decode(bytearray, C),
|
||||||
Cand#{candidate := Hash}.
|
Cand#{candidate := Hash}.
|
||||||
|
|
||||||
|
wd_ping() ->
|
||||||
|
gmhc_watchdog:ping().
|
||||||
|
|||||||
@ -1,6 +1,6 @@
|
|||||||
%% -*- mode: erlang; erlang-indent-level: 4; indent-tabs-mode: nil -*-
|
%% -*- mode: erlang; erlang-indent-level: 4; indent-tabs-mode: nil -*-
|
||||||
-module(gmhc_sup).
|
-module(gmhc_sup).
|
||||||
-vsn("0.6.1").
|
-vsn("0.8.3").
|
||||||
|
|
||||||
-behaviour(supervisor).
|
-behaviour(supervisor).
|
||||||
|
|
||||||
@ -15,12 +15,13 @@ start_link() ->
|
|||||||
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
|
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
|
||||||
|
|
||||||
init([]) ->
|
init([]) ->
|
||||||
ChildSpecs = [ worker(gmhc_server)
|
ChildSpecs = [ worker(gmhc_watchdog)
|
||||||
|
, worker(gmhc_server)
|
||||||
, worker(gmhc_handler)
|
, worker(gmhc_handler)
|
||||||
, supervisor(gmhc_connectors_sup) ],
|
, supervisor(gmhc_connectors_sup) ],
|
||||||
SupFlags = #{ strategy => one_for_one
|
SupFlags = #{ strategy => rest_for_one
|
||||||
, intensity => 1
|
, intensity => 5 %% We really want the hive client to sort itself out
|
||||||
, period => 5
|
, period => 5*60 %% Timemout issues can happen infrequently
|
||||||
, auto_shutdown => never },
|
, auto_shutdown => never },
|
||||||
{ok, {SupFlags, ChildSpecs}}.
|
{ok, {SupFlags, ChildSpecs}}.
|
||||||
|
|
||||||
|
|||||||
167
src/gmhc_watchdog.erl
Normal file
167
src/gmhc_watchdog.erl
Normal file
@ -0,0 +1,167 @@
|
|||||||
|
-module(gmhc_watchdog).
|
||||||
|
-behavior(gen_server).
|
||||||
|
|
||||||
|
-export([ watch/2
|
||||||
|
, unwatch/0
|
||||||
|
, ping/0 ]).
|
||||||
|
|
||||||
|
-export([ note_started/2
|
||||||
|
, remove_restart_info/1
|
||||||
|
, get_restart_info/0 ]).
|
||||||
|
|
||||||
|
-export([ start_link/0
|
||||||
|
, init/1
|
||||||
|
, handle_call/3
|
||||||
|
, handle_cast/2
|
||||||
|
, handle_info/2
|
||||||
|
, terminate/2
|
||||||
|
, code_change/3 ]).
|
||||||
|
|
||||||
|
-record(st, {services = #{}}).
|
||||||
|
-record(svc, { n = 5 :: pos_integer()
|
||||||
|
, n0 = 5 :: pos_integer()
|
||||||
|
, interval = 5000 :: pos_integer()
|
||||||
|
, tref :: reference()
|
||||||
|
, mref :: reference()
|
||||||
|
}).
|
||||||
|
|
||||||
|
-include_lib("kernel/include/logger.hrl").
|
||||||
|
|
||||||
|
watch(Interval, N) ->
|
||||||
|
gen_server:call(?MODULE, {watch, self(), Interval, N}).
|
||||||
|
|
||||||
|
unwatch() ->
|
||||||
|
gen_server:cast(?MODULE, {unwatch, self()}).
|
||||||
|
|
||||||
|
ping() ->
|
||||||
|
gen_server:cast(?MODULE, {ping, self()}).
|
||||||
|
|
||||||
|
note_started(Id, Info) ->
|
||||||
|
gen_server:call(?MODULE, {note_started, Id, Info}).
|
||||||
|
|
||||||
|
get_restart_info() ->
|
||||||
|
gen_server:call(?MODULE, get_restart_info).
|
||||||
|
|
||||||
|
remove_restart_info([]) ->
|
||||||
|
ok;
|
||||||
|
remove_restart_info(IDs) ->
|
||||||
|
gen_server:call(?MODULE, {remove_restart_info, IDs}).
|
||||||
|
|
||||||
|
start_link() ->
|
||||||
|
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
|
||||||
|
|
||||||
|
init([]) ->
|
||||||
|
{ok, #st{}}.
|
||||||
|
|
||||||
|
handle_call({note_started, Id, Info}, _From, S) ->
|
||||||
|
update_pt(Id, Info),
|
||||||
|
{reply, ok, S};
|
||||||
|
handle_call({remove_restart_info, IDs}, _From, S) ->
|
||||||
|
remove_restart_info_(IDs),
|
||||||
|
{reply, ok, S};
|
||||||
|
handle_call(get_restart_info, _From, S) ->
|
||||||
|
{reply, get_pt(), S};
|
||||||
|
handle_call({watch, Pid, Interval, N}, _From, S) ->
|
||||||
|
{reply, ok, add_watch(Pid, Interval, N, S)};
|
||||||
|
handle_call(_Req, _From, S) ->
|
||||||
|
{reply, {error, unknown_method}, S}.
|
||||||
|
|
||||||
|
handle_cast({ping, Pid}, S) ->
|
||||||
|
{noreply, reset_watch(Pid, S)};
|
||||||
|
handle_cast({unwatch, Pid}, S) ->
|
||||||
|
{noreply, delete_watch(Pid, S)};
|
||||||
|
handle_cast(Msg, S) ->
|
||||||
|
?LOG_DEBUG("Unknown cast: ~p", [Msg]),
|
||||||
|
{noreply, S}.
|
||||||
|
|
||||||
|
handle_info({timeout, TRef, Pid}, S) ->
|
||||||
|
?LOG_INFO("Timeout for pid ~p", [Pid]),
|
||||||
|
{noreply, ping_timeout(Pid, TRef, S)};
|
||||||
|
handle_info({'DOWN', _, process, Pid, _}, S) ->
|
||||||
|
{noreply, delete_watch(Pid, S)};
|
||||||
|
handle_info(Msg, S) ->
|
||||||
|
?LOG_DEBUG("Unknown msg: ~p", [Msg]),
|
||||||
|
{noreply, S}.
|
||||||
|
|
||||||
|
terminate(_, _) ->
|
||||||
|
ok.
|
||||||
|
|
||||||
|
code_change(_FromVsn, S, _Extra) ->
|
||||||
|
{ok, S}.
|
||||||
|
|
||||||
|
add_watch(Pid, Interval, N, #st{services = Svcs} = S) ->
|
||||||
|
MRef = erlang:monitor(process, Pid),
|
||||||
|
Svc0 = #svc{ interval = Interval
|
||||||
|
, mref = MRef
|
||||||
|
, n = N
|
||||||
|
, n0 = N},
|
||||||
|
Svc = start_timer(Pid, Svc0),
|
||||||
|
S#st{services = Svcs#{Pid => Svc}}.
|
||||||
|
|
||||||
|
reset_watch(Pid, #st{services = Svcs} = S) ->
|
||||||
|
case maps:find(Pid, Svcs) of
|
||||||
|
{ok, #svc{ n0 = N0 } = Svc} ->
|
||||||
|
Svc1 = restart_timer(Pid, Svc#svc{n = N0}),
|
||||||
|
S#st{services = Svcs#{Pid := Svc1}};
|
||||||
|
error ->
|
||||||
|
S
|
||||||
|
end.
|
||||||
|
|
||||||
|
delete_watch(Pid, #st{services = Svcs} = S) ->
|
||||||
|
case maps:find(Pid, Svcs) of
|
||||||
|
{ok, #svc{tref = TRef, mref = MRef}} ->
|
||||||
|
erlang:cancel_timer(TRef),
|
||||||
|
erlang:demonitor(MRef),
|
||||||
|
S#st{services = maps:remove(Pid, Svcs)};
|
||||||
|
error ->
|
||||||
|
S
|
||||||
|
end.
|
||||||
|
|
||||||
|
ping_timeout(Pid, TRef, #st{services = Svcs} = S) ->
|
||||||
|
case maps:find(Pid, Svcs) of
|
||||||
|
{ok, #svc{ n = N, tref = TRef} = Svc} ->
|
||||||
|
N1 = N-1,
|
||||||
|
if N1 =< 0 ->
|
||||||
|
?LOG_ERROR("Will exit Pid ~p", [Pid]),
|
||||||
|
exit(Pid, kill),
|
||||||
|
S#st{services = maps:remove(Pid, Svcs)};
|
||||||
|
true ->
|
||||||
|
Svc1 = restart_timer(Pid, Svc#svc{n = N1}),
|
||||||
|
S#st{services = Svcs#{Pid := Svc1}}
|
||||||
|
end;
|
||||||
|
{ok, _} ->
|
||||||
|
?LOG_DEBUG("Timeout didn't match TRef - ignoring", []),
|
||||||
|
S;
|
||||||
|
_ ->
|
||||||
|
S
|
||||||
|
end.
|
||||||
|
|
||||||
|
start_timer(Pid, #svc{interval = T} = Svc) ->
|
||||||
|
TRef = erlang:start_timer(T, self(), Pid),
|
||||||
|
Svc#svc{tref = TRef}.
|
||||||
|
|
||||||
|
restart_timer(Pid, #svc{tref = TRef} = Svc) ->
|
||||||
|
erlang:cancel_timer(TRef),
|
||||||
|
start_timer(Pid, Svc#svc{tref = undefined}).
|
||||||
|
|
||||||
|
update_pt(Id, Info) ->
|
||||||
|
Pt = get_pt(),
|
||||||
|
put_pt(Pt#{Id => Info}).
|
||||||
|
|
||||||
|
remove_restart_info_(IDs) ->
|
||||||
|
RI = get_pt(),
|
||||||
|
case maps:without(IDs, RI) of
|
||||||
|
RI ->
|
||||||
|
ok;
|
||||||
|
NewRI ->
|
||||||
|
put_pt(NewRI)
|
||||||
|
end.
|
||||||
|
|
||||||
|
get_pt() ->
|
||||||
|
persistent_term:get(pt_key(), #{}).
|
||||||
|
|
||||||
|
put_pt(Pt) ->
|
||||||
|
persistent_term:put(pt_key(), Pt).
|
||||||
|
|
||||||
|
pt_key() ->
|
||||||
|
{?MODULE, restart_info}.
|
||||||
@ -8,7 +8,7 @@
|
|||||||
%%%-------------------------------------------------------------------
|
%%%-------------------------------------------------------------------
|
||||||
|
|
||||||
-module(gmhc_workers).
|
-module(gmhc_workers).
|
||||||
-vsn("0.6.1").
|
-vsn("0.8.3").
|
||||||
|
|
||||||
-export([
|
-export([
|
||||||
get_worker_configs/0
|
get_worker_configs/0
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
-module(gmhive_client).
|
-module(gmhive_client).
|
||||||
-vsn("0.6.1").
|
-vsn("0.8.3").
|
||||||
|
|
||||||
-export([ connect/1
|
-export([ connect/1
|
||||||
, disconnect/1
|
, disconnect/1
|
||||||
|
|||||||
@ -2,15 +2,15 @@
|
|||||||
{type,app}.
|
{type,app}.
|
||||||
{modules,[]}.
|
{modules,[]}.
|
||||||
{prefix,"gmhc"}.
|
{prefix,"gmhc"}.
|
||||||
{desc,"Gajumaru Hive Client"}.
|
|
||||||
{author,"Ulf Wiger, QPQ AG"}.
|
{author,"Ulf Wiger, QPQ AG"}.
|
||||||
{package_id,{"uwiger","gmhive_client",{0,6,3}}}.
|
{desc,"Gajumaru Hive Client"}.
|
||||||
{deps,[{"uwiger","gmhive_worker",{0,5,1}},
|
{package_id,{"uwiger","gmhive_client",{0,10,0}}}.
|
||||||
|
{deps,[{"uwiger","gmhive_protocol",{0,3,1}},
|
||||||
|
{"uwiger","gmhive_worker",{0,5,1}},
|
||||||
{"uwiger","gmcuckoo",{1,2,4}},
|
{"uwiger","gmcuckoo",{1,2,4}},
|
||||||
{"otpr","eblake2",{1,0,1}},
|
{"otpr","eblake2",{1,0,1}},
|
||||||
{"otpr","base58",{0,1,1}},
|
{"otpr","base58",{0,1,1}},
|
||||||
{"otpr","gmserialization",{0,1,3}},
|
{"otpr","gmserialization",{0,1,3}},
|
||||||
{"uwiger","gmhive_protocol",{0,1,1}},
|
|
||||||
{"uwiger","setup",{2,2,4}},
|
{"uwiger","setup",{2,2,4}},
|
||||||
{"uwiger","gproc",{1,0,1}},
|
{"uwiger","gproc",{1,0,1}},
|
||||||
{"uwiger","gmconfig",{0,1,2}},
|
{"uwiger","gmconfig",{0,1,2}},
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user