Commit 99a18163 authored by Jason Rhinelander's avatar Jason Rhinelander
Browse files

Added pool list code

parents
--
-- PostgreSQL database dump
--
-- Dumped from database version 10.3 (Debian 10.3-2)
-- Dumped by pg_dump version 10.3 (Debian 10.3-2)
SET statement_timeout = 0;
SET lock_timeout = 0;
SET idle_in_transaction_session_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = on;
SELECT pg_catalog.set_config('search_path', '', false);
SET check_function_bodies = false;
SET client_min_messages = warning;
SET row_security = off;
--
-- Name: plpgsql; Type: EXTENSION; Schema: -; Owner:
--
CREATE EXTENSION IF NOT EXISTS plpgsql WITH SCHEMA pg_catalog;
--
-- Name: EXTENSION plpgsql; Type: COMMENT; Schema: -; Owner:
--
COMMENT ON EXTENSION plpgsql IS 'PL/pgSQL procedural language';
--
-- Name: pool_agg_stats(interval); Type: FUNCTION; Schema: public; Owner: graft
--
CREATE FUNCTION public.pool_agg_stats(ago interval) RETURNS TABLE(pool integer, hashrate double precision, hashrate_sd double precision, miners double precision)
LANGUAGE plpgsql
AS $$
begin
return query SELECT pool_stats.pool, avg(pool_stats.hashrate), stddev_samp(pool_stats.hashrate), avg(pool_stats.miners::float) from pool_stats join pool_fetches on pool_fetch = id where time > now() - ago group by pool_stats.pool;
end;
$$;
ALTER FUNCTION public.pool_agg_stats(ago interval) OWNER TO graft;
SET default_tablespace = '';
SET default_with_oids = false;
--
-- Name: pool_admin_pass; Type: TABLE; Schema: public; Owner: graft
--
CREATE TABLE public.pool_admin_pass (
password text NOT NULL
);
ALTER TABLE public.pool_admin_pass OWNER TO graft;
--
-- Name: pool_agg_stats; Type: MATERIALIZED VIEW; Schema: public; Owner: graft
--
CREATE MATERIALIZED VIEW public.pool_agg_stats AS
SELECT s1.pool,
s1.hashrate AS hashrate_1d,
s1.hashrate_sd AS hashrate_sd_1d,
s1.miners AS miners_1d,
s3.hashrate AS hashrate_3d,
s3.hashrate_sd AS hashrate_sd_3d,
s3.miners AS miners_3d,
s7.hashrate AS hashrate_7d,
s7.hashrate_sd AS hashrate_sd_7d,
s7.miners AS miners_7d
FROM ((public.pool_agg_stats('1 day'::interval) s1(pool, hashrate, hashrate_sd, miners)
JOIN public.pool_agg_stats('3 days'::interval) s3(pool, hashrate, hashrate_sd, miners) ON ((s1.pool = s3.pool)))
JOIN public.pool_agg_stats('7 days'::interval) s7(pool, hashrate, hashrate_sd, miners) ON ((s1.pool = s7.pool)))
WITH NO DATA;
ALTER TABLE public.pool_agg_stats OWNER TO graft;
--
-- Name: pool_blocks; Type: TABLE; Schema: public; Owner: graft
--
CREATE TABLE public.pool_blocks (
pool integer NOT NULL,
height integer NOT NULL,
hash text NOT NULL
);
ALTER TABLE public.pool_blocks OWNER TO graft;
--
-- Name: pool_fetches; Type: TABLE; Schema: public; Owner: graft
--
CREATE TABLE public.pool_fetches (
id bigint NOT NULL,
"time" timestamp with time zone DEFAULT now(),
height integer NOT NULL,
difficulty bigint NOT NULL
);
ALTER TABLE public.pool_fetches OWNER TO graft;
--
-- Name: pool_fetches_id_seq; Type: SEQUENCE; Schema: public; Owner: graft
--
CREATE SEQUENCE public.pool_fetches_id_seq
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
ALTER TABLE public.pool_fetches_id_seq OWNER TO graft;
--
-- Name: pool_fetches_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: graft
--
ALTER SEQUENCE public.pool_fetches_id_seq OWNED BY public.pool_fetches.id;
--
-- Name: pool_stats; Type: TABLE; Schema: public; Owner: graft
--
CREATE TABLE public.pool_stats (
pool_fetch bigint NOT NULL,
pool integer NOT NULL,
height integer,
blocks_found integer,
hashrate double precision,
effort double precision,
miners integer,
miners_paid integer,
payments integer,
fee double precision,
threshold double precision,
error text
);
ALTER TABLE public.pool_stats OWNER TO graft;
--
-- Name: pool_hashrate_chart; Type: MATERIALIZED VIEW; Schema: public; Owner: graft
--
CREATE MATERIALIZED VIEW public.pool_hashrate_chart AS
SELECT pool_stats.pool,
avg(pool_stats.hashrate) AS hashrate,
date_trunc('hour'::text, pool_fetches."time") AS hour
FROM (public.pool_stats
JOIN public.pool_fetches ON ((pool_stats.pool_fetch = pool_fetches.id)))
GROUP BY pool_stats.pool, (date_trunc('hour'::text, pool_fetches."time"))
WITH NO DATA;
ALTER TABLE public.pool_hashrate_chart OWNER TO graft;
--
-- Name: pools; Type: TABLE; Schema: public; Owner: graft
--
CREATE TABLE public.pools (
id integer NOT NULL,
name text NOT NULL,
url text NOT NULL,
blocks_url text NOT NULL,
api_url text NOT NULL,
api text NOT NULL,
enabled boolean DEFAULT true NOT NULL,
location text
);
ALTER TABLE public.pools OWNER TO graft;
--
-- Name: pools_id_seq; Type: SEQUENCE; Schema: public; Owner: graft
--
CREATE SEQUENCE public.pools_id_seq
AS integer
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
ALTER TABLE public.pools_id_seq OWNER TO graft;
--
-- Name: pools_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: graft
--
ALTER SEQUENCE public.pools_id_seq OWNED BY public.pools.id;
--
-- Name: pool_fetches id; Type: DEFAULT; Schema: public; Owner: graft
--
ALTER TABLE ONLY public.pool_fetches ALTER COLUMN id SET DEFAULT nextval('public.pool_fetches_id_seq'::regclass);
--
-- Name: pools id; Type: DEFAULT; Schema: public; Owner: graft
--
ALTER TABLE ONLY public.pools ALTER COLUMN id SET DEFAULT nextval('public.pools_id_seq'::regclass);
--
-- Name: pool_admin_pass pool_admin_pass_pkey; Type: CONSTRAINT; Schema: public; Owner: graft
--
ALTER TABLE ONLY public.pool_admin_pass
ADD CONSTRAINT pool_admin_pass_pkey PRIMARY KEY (password);
--
-- Name: pool_blocks pool_blocks_pkey; Type: CONSTRAINT; Schema: public; Owner: graft
--
ALTER TABLE ONLY public.pool_blocks
ADD CONSTRAINT pool_blocks_pkey PRIMARY KEY (pool, height, hash);
--
-- Name: pool_fetches pool_fetches_pkey; Type: CONSTRAINT; Schema: public; Owner: graft
--
ALTER TABLE ONLY public.pool_fetches
ADD CONSTRAINT pool_fetches_pkey PRIMARY KEY (id);
--
-- Name: pool_stats pool_stats_pkey; Type: CONSTRAINT; Schema: public; Owner: graft
--
ALTER TABLE ONLY public.pool_stats
ADD CONSTRAINT pool_stats_pkey PRIMARY KEY (pool_fetch, pool);
--
-- Name: pools pools_name_key; Type: CONSTRAINT; Schema: public; Owner: graft
--
ALTER TABLE ONLY public.pools
ADD CONSTRAINT pools_name_key UNIQUE (name);
--
-- Name: pools pools_pkey; Type: CONSTRAINT; Schema: public; Owner: graft
--
ALTER TABLE ONLY public.pools
ADD CONSTRAINT pools_pkey PRIMARY KEY (id);
--
-- Name: pool_agg_stats_pool_idx; Type: INDEX; Schema: public; Owner: graft
--
CREATE UNIQUE INDEX pool_agg_stats_pool_idx ON public.pool_agg_stats USING btree (pool);
--
-- Name: pool_blocks_hash_idx; Type: INDEX; Schema: public; Owner: graft
--
CREATE INDEX pool_blocks_hash_idx ON public.pool_blocks USING btree (hash);
--
-- Name: pool_fetches_time_idx; Type: INDEX; Schema: public; Owner: graft
--
CREATE INDEX pool_fetches_time_idx ON public.pool_fetches USING btree ("time" DESC);
--
-- Name: pool_blocks pool_blocks_pool_fkey; Type: FK CONSTRAINT; Schema: public; Owner: graft
--
ALTER TABLE ONLY public.pool_blocks
ADD CONSTRAINT pool_blocks_pool_fkey FOREIGN KEY (pool) REFERENCES public.pools(id);
--
-- Name: pool_stats pool_stats_pool_fetch_fkey; Type: FK CONSTRAINT; Schema: public; Owner: graft
--
ALTER TABLE ONLY public.pool_stats
ADD CONSTRAINT pool_stats_pool_fetch_fkey FOREIGN KEY (pool_fetch) REFERENCES public.pool_fetches(id) ON DELETE CASCADE;
--
-- Name: pool_stats pool_stats_pool_fkey; Type: FK CONSTRAINT; Schema: public; Owner: graft
--
ALTER TABLE ONLY public.pool_stats
ADD CONSTRAINT pool_stats_pool_fkey FOREIGN KEY (pool) REFERENCES public.pools(id);
--
-- PostgreSQL database dump complete
--
#!/usr/bin/python3
# This script provides the pool API interface used by the web pages to display pool data. It is
# designed to be handled as a uwsgi script, for example via uwsgi-emperor, proxied by the web server
# to serve API requests.
# Configure this to set up the postgresql database access. For a properly configured local
# postgresql with a postgresql user corresponding to the user running this script you generally just
# need the 'dbname' here; for more complex setups see the list of parameters at:
# http://initd.org/psycopg/docs/module.html#psycopg2.connect
PG_CONNECT = { 'dbname': 'graftpools' }
import psycopg2, psycopg2.extras
import json
import urllib
import requests
import socket
import ipaddress
import re
_pgsql = None
def pgsql():
global _pgsql
if _pgsql is not None:
try:
_pgsql.cursor().execute("SELECT 1")
return _pgsql
except Exception as e:
pass
_pgsql = psycopg2.connect(**PG_CONNECT, cursor_factory=psycopg2.extras.DictCursor)
_pgsql.autocommit = True
return _pgsql
def application(env, start_response):
if env['REQUEST_METHOD'] == 'POST':
size = int(env['CONTENT_LENGTH'])
if size > 0:
q = urllib.parse.parse_qs(env['wsgi.input'].read(size).decode('utf-8'), keep_blank_values=True)
else:
q = {}
else:
q = urllib.parse.parse_qs(env['QUERY_STRING'], keep_blank_values=True)
for k in q:
if len(q[k]) == 1:
q[k] = q[k][0]
if k.endswith('[]') and k[0:-2] not in q:
q[k[0:-2]] = q[k]
del q[k]
if env['PATH_INFO'] == '/stats':
return pool_stats(env, start_response, q)
elif env['PATH_INFO'] == '/pools':
return pool_list(env, start_response, q)
elif env['PATH_INFO'] == '/find':
return find_pool(env, start_response, q)
elif env['PATH_INFO'] == '/add-pool':
return add_pool(env, start_response, q)
elif env['PATH_INFO'] == '/modify-pool':
return modify_pool(env, start_response, q)
elif env['PATH_INFO'] == '/detect-remote':
return detect_remote(env, start_response, q)
start_response('404 Not Found', [('Content-Type', 'text/plain')])
return [b'Not Found']
def pool_stats(env, start_response, q):
pg = pgsql()
cur = pg.cursor()
cur.execute("SELECT * FROM pool_fetches ORDER BY time DESC LIMIT 1")
data = dict(cur.fetchone())
data['time'] = data['time'].timestamp()
cur.execute("""
SELECT id, name, url, blocks_url, location, height, blocks_found, hashrate, effort, miners, miners_paid, payments, fee, threshold, error,
hashrate_1d AS hr1, hashrate_7d AS hr7
FROM pools JOIN pool_stats ON pool = id JOIN pool_agg_stats ON pool_stats.pool = pool_agg_stats.pool
WHERE pool_fetch = %s AND enabled
""", (data['id'],))
data['hashrate_synced'] = 0
data['hashrate_desynced'] = 0
data['pools'] = []
pool_index = {}
for pool in cur:
p = dict(pool)
pool_id = p['id']
del p['id']
pool_index[pool_id] = len(data['pools'])
# Look for out-of-sync pools; this can happen in two ways: a pool could have desynched (or
# not forked) and stalled, with height too low, or it could have desynched and run away,
# with height too high. On the other hand, it's normal to see pools 1-2 blocks out of sync
# (i.e. the API scraping could occur around the same time a block is discovered), so we
# allow up to ±3 from the local node.
if 'height' in p and p['height'] is not None:
p['desync'] = abs(data['height'] - p['height']) > 3
# Some pools (e.g. MoneroOcean) don't expose node height, so just always assume they are synced.
elif p['error'] is None:
p['desync'] = False
else:
p['desync'] = True
if p['hashrate'] is not None:
data['hashrate_desynced' if p['desync'] else 'hashrate_synced'] += p['hashrate']
data['pools'].append(p)
cur.execute("SELECT pool, hashrate, hour FROM pool_hashrate_chart ORDER BY hour")
data['hr_chart'] = []
last_hour = None
for row in cur:
pool_id, hr, hour = row[0:3]
if pool_id not in pool_index:
continue
if hour != last_hour:
data['hr_chart'].append({'hour': hour.timestamp(), 'kh': [None] * len(data['pools']) })
last_hour = hour
chart = data['hr_chart'][-1]
kh = None
if hr is not None:
kh = hr / 1000.
kh = round(kh, 0 if kh >= 100 else 1 if kh >= 10 else 2 if kh >= 1 else 3)
chart['kh'][pool_index[pool_id]] = kh
start_response('200 OK', [('Content-Type', 'application/json')])
return [json.dumps(data).encode('utf-8')]
def pool_list(env, start_response, q):
pg = pgsql()
cur = pg.cursor()
cur.execute("SELECT * FROM pools ORDER BY UPPER(name)")
pools = []
for pool in cur:
p = dict(pool)
pools.append(p)
start_response('200 OK', [('Content-Type', 'application/json')])
return [json.dumps(pools).encode('utf-8')]
def find_pool(env, start_response, q):
blocks = {}
find = q['block'] if 'block' in q else None
if find:
pg = pgsql()
cur = pg.cursor()
cur.execute("SELECT name, blocks_url, height, hash FROM pool_blocks JOIN pools ON pool = id WHERE hash IN %s",
(tuple(find),))
for row in cur:
blocks[row['hash']] = { 'pool': row['name'], 'height': row['height'], 'blocks_url': row['blocks_url'] }
for h in find:
if h not in blocks:
blocks[h] = None
start_response('200 OK', [('Content-Type', 'application/json')])
return [json.dumps(blocks).encode('utf-8')]
def error(start_response, message):
start_response('200 OK', [('Content-Type', 'application/json')])
return [json.dumps({ 'error': message }).encode('utf-8')]
def require_auth(env, start_response, q):
"""Checks auth_code input for a valid auth code. If valid, returns None. Otherwise sets up
and returns a response for the caller to return."""
authed = False
if 'auth_code' in q:
pg = pgsql()
cur = pg.cursor()
cur.execute("SELECT COUNT(*) > 0 FROM pool_admin_pass WHERE password = %s", (q['auth_code'],))
authed = cur.fetchone()[0]
if not authed:
return error(start_response, 'Invalid or missing authentication code')
return None
def check_ncp(url):
r = requests.get(url + '/stats', timeout=3)
r.raise_for_status()
data = r.json()
if 'config' not in data or 'pool' not in data:
raise RuntimeError("Pool API doesn't look like a standard ncp-type pool")
return data
def check_nodejs(url):
r = requests.get(url + '/pool/stats', timeout=3)
r.raise_for_status()
data = r.json()
if 'pool_statistics' not in data:
raise RuntimeError("Pool API doesn't look like a standard nodejs-type pool")
return data
def add_pool(env, start_response, q):
auth_failed = require_auth(env, start_response, q)
if auth_failed:
return auth_failed
for x in ('name', 'url', 'blocks_url', 'api', 'api_url', 'location'):
if x not in q:
return error(start_response, "Required parameter '" + x + "' not specified")
if q['api_url'].endswith('/'):
q['api_url'] = q['api_url'][0:-1]
if not any(q['api_url'].startswith(x) for x in ('http://', 'https://')):
return error(start_response, "API URL '" + q['api_url'] + "' doesn't look like a valid URL")
try:
if q['api'] in ('ncp', 'ncp+1'):
check_ncp(q['api_url'])
elif q['api'] in ('nodejs', 'nodejs+1'):
check_nodejs(q['api_url'])
else:
return error(start_response, "Unknown/unsupported API type '" + q['api'] + "'")
except Exception as e:
return error(start_response, "API URL check failed: " + str(e))
pg = pgsql()
cur = pg.cursor()
try:
cur.execute("INSERT INTO pools (name, url, blocks_url, api, api_url, enabled, location) VALUES (%s, %s, %s, %s, %s, %s, %s)",
(q['name'], q['url'], q['blocks_url'], q['api'], q['api_url'], bool('enabled' in q and q['enabled']), q['location']))
except psycopg2.Error as e:
return error(start_response, "Unable to insert pool: " + str(e))
start_response('200 OK', [('Content-Type', 'application/json')])
return [json.dumps({ 'success': "Added pool '{}' to pools list".format(q['name']) }).encode('utf-8')]
def modify_pool(env, start_response, q):
auth_failed = require_auth(env, start_response, q)
if auth_failed:
return auth_failed
if 'id' not in q:
return error(start_response, "Required parameter 'id' not specified")
update = []
params = []
for x in ('name', 'url', 'blocks_url', 'api', 'api_url', 'location'):
if x in q:
update.append(x)
params.append(q[x])
if 'enabled' in q:
update.append('enabled')
params.append(bool(q['enabled']))
if not update:
return error(start_response, "Nothing to modify!")
if 'api_url' in q:
if q['api_url'].endswith('/'):
q['api_url'] = q['api_url'][0:-1]
if not any(q['api_url'].startswith(x) for x in ('http://', 'https://')):
return error(start_response, "API URL '" + q['api_url'] + "' doesn't look like a valid URL")
if q['enabled']:
try:
if q['api'] in ('ncp', 'ncp+1'):
check_ncp(q['api_url'])
elif q['api'] in ('nodejs', 'nodejs+1'):
check_nodejs(q['api_url'])
else:
return error(start_response, "Unknown/unsupported API type '" + q['api'] + "'")
except Exception as e:
return error(start_response, "API URL check failed: {}".format(e))
pg = pgsql()
cur = pg.cursor()
try:
cur.execute("UPDATE pools SET " + ', '.join(x + ' = %s' for x in update) + ' WHERE id = %s', (*params, q['id']))
except psycopg2.Error as e:
return error(start_response, "Unable to insert pool: " + str(e))
if not cur.rowcount:
return error(start_response, "Pool with id '" + q['id'] + "' not found")
start_response('200 OK', [('Content-Type', 'application/json')])
return [json.dumps({ 'success': "Updated pool '{}'".format(q['name'] if 'name' in q else q['id']) }).encode('utf-8')]
def detect_remote(env, start_response, q):
"""Takes a URL and tries to figure out whether it's a ncp or nodejs pool by looking for
config.js or globals.js, respectively, in the source code. For each, we follow the
config/globals.js and try to extract the API URL out of it, then make an API request to make
sure that it looks like the right type."""
auth_failed = require_auth(env, start_response, q)
if auth_failed:
return auth_failed
# Check to make sure the URL looks valid, and resolves to a public address: