Index: backend/pgsql.t ================================================================== --- backend/pgsql.t +++ backend/pgsql.t @@ -1,9 +1,23 @@ -- vim: ft=terra local pstring = lib.mem.ptr(int8) local binblob = lib.mem.ptr(uint8) local queries = { + server_setup_self = { + params = {rawstring,binblob,int64}, cmd = true, sql = [[ + insert into parsav_servers (id, domain, key, parsav, knownsince) + values (0, $1::text, $2::bytea, true, $3::bigint) + ]]; + }; + + server_fetch_sid = { + params = {uint64}, sql = [[ + select domain, key, knownsince, parsav from parsav_servers + where id = $1::bigint + ]]; + }; + conf_get = { params = {rawstring}, sql = [[ select value from parsav_config where key = $1::text limit 1 ]]; @@ -1148,17 +1162,36 @@ conprep = [terra(src: &lib.store.source, mode: lib.store.prepmode.t): {} var [con] = [&lib.pq.PGconn](src.handle) if mode == lib.store.prepmode.full then [prep] elseif mode == lib.store.prepmode.conf or mode == lib.store.prepmode.admin then + queries.server_setup_self.prep(con) queries.conf_get.prep(con) queries.conf_set.prep(con) queries.conf_reset.prep(con) if mode == lib.store.prepmode.admin then + queries.server_fetch_sid.prep(con) + queries.actor_fetch_uid.prep(con) + queries.actor_fetch_xid.prep(con) + queries.actor_enum.prep(con) + queries.actor_enum_local.prep(con) + queries.actor_stats.prep(con) + queries.actor_powers_fetch.prep(con) + queries.actor_save.prep(con) + queries.actor_create.prep(con) + queries.actor_purge_uid.prep(con) end else lib.bail('unsupported connection preparation mode') end end]; + + server_setup_self = [terra( + src: &lib.store.source, + domain: rawstring, + key: binblob + ): {} + queries.server_setup_self.exec(src,domain,key,lib.osclock.time(nil)) + end]; dbsetup = [terra(src: &lib.store.source): bool var res = lib.pq.PQexec([&lib.pq.PGconn](src.handle), schema) if lib.pq.PQresultStatus(res) == lib.pq.PGRES_COMMAND_OK then lib.report('successfully instantiated schema in database') Index: backend/schema/pgsql.sql ================================================================== --- backend/schema/pgsql.sql +++ backend/schema/pgsql.sql @@ -1,9 +1,11 @@ create table parsav_config ( key text primary key, value text ); +comment on table parsav_config is +'server-wide configuration variables. highly sensitive!'; insert into parsav_config (key,value) values ('schema-version','1'), ('credential-store','managed'); -- ('bind',:'bind'), -- ('domain',:'domain'), @@ -19,10 +21,12 @@ domain text not null unique, key bytea, knownsince bigint, parsav boolean -- whether to use parsav protocol extensions ); +comment on table parsav_servers is +'all servers known to the parsav instance. the local server (including its private key) is stored in row (id = 0)'; create table parsav_actors ( id bigint primary key default (1+random()*(2^63-1))::bigint, nym text, handle text not null, -- nym [@handle@origin] @@ -39,10 +43,12 @@ epithet text, authtime bigint not null, -- cookies earlier than this timepoint will not be accepted unique (handle,origin) ); +comment on table parsav_actors is +'all users known to the instance across the fediverse; local users satisfy constraint (origin = 0)'; create table parsav_rights ( key text, actor bigint references parsav_actors(id) on delete cascade, @@ -50,10 +56,12 @@ scope bigint, -- for future expansion primary key (key,actor) ); create index on parsav_rights (actor); +comment on table parsav_rights is +'a backward-compatible list of every non-default privilege or deprivilege granted to a local user'; create table parsav_posts ( id bigint primary key default (1+random()*(2^63-1))::bigint, author bigint references parsav_actors(id) on delete cascade, subject text, @@ -83,42 +91,50 @@ on delete cascade, -- e.g. followed kind smallint, -- e.g. follow, block, mute primary key (relator, relatee, kind) ); +comment on table parsav_rels is +'all relationships, positive and negative, between local users and other users; kind is a version-specific integer mapping to a type-of-relationship enum in store.t'; create table parsav_acts ( id bigint primary key default (1+random()*(2^63-1))::bigint, kind text not null, -- like, rt, react, so on time bigint not null, actor bigint references parsav_actors(id) on delete cascade, subject bigint, -- may be post or act, depending on kind - body text -- emoji, if react + body text -- emoji, if react; complaint, if report ); create index on parsav_acts (subject); create index on parsav_acts (actor); create index on parsav_acts (time); +comment on table parsav_acts is +'every simple action taken on a tweet by an actor, including likes, rts, reacts, and reports'; create table parsav_log ( -- accesses are tracked for security & sending delete acts id bigint primary key default (1+random()*(2^63-1))::bigint, time bigint not null, actor bigint references parsav_actors(id) on delete cascade, post bigint not null ); +comment on table parsav_log is +'a log of accesses from foreign servers, tracking which will be sent update & delete events for each post'; create table parsav_artifacts ( id bigint primary key default (1+random()*(2^63-1))::bigint, birth bigint not null, content bytea, -- if null, this is a "ban record" preventing content matching the hash from being re-uploaded hash bytea unique not null, -- sha256 hash of content -- it would be cool to use a computed column for this, but i don't want -- to lock people into PG12 or drag in the pgcrypto extension just for this - mime text -- null if unknown, will be reported as x-octet-stream + mime text -- null if unknown, will be reported as octet-stream ); create index on parsav_artifacts (mime); +comment on table parsav_artifacts is +'deduplicated media files uploaded by users'; create table parsav_artifact_claims ( birth bigint not null, uid bigint references parsav_actors(id) on delete cascade, rid bigint references parsav_artifacts(id) on delete cascade, @@ -127,10 +143,12 @@ unique (uid,rid) ); create index on parsav_artifact_claims (uid); create index on parsav_artifact_claims (uid,folder); +comment on table parsav_artifact_claims is +'a list of users who have an ownership interest in each artifact (effectively an index of GC roots)'; create table parsav_circles ( id bigint primary key default (1+random()*(2^63-1))::bigint, owner bigint not null references parsav_actors(id) on delete cascade, name text not null, @@ -145,10 +163,12 @@ origin bigint references parsav_servers(id) on delete cascade, name text not null, description text not null, policy smallint not null ); +comment on table parsav_rooms is +'an index of user-created chatrooms'; create table parsav_room_members ( room bigint not null references parsav_rooms(id) on delete cascade, member bigint not null references parsav_actors(id) on delete cascade, rank smallint not null default 0, @@ -167,10 +187,12 @@ issuer bigint references parsav_actors(id) on delete set null, handle text, -- admin can lock invite to specific handle rank smallint not null default 0, quota integer not null default 1000 ); +comment on table parsav_invites is +'all active invitations and the level of authority they grant if accepted'; create table parsav_sanctions ( id bigint primary key default (1+random()*(2^63-1))::bigint, issuer bigint references parsav_actors(id) on delete set null, scope bigint, -- can be null or room for local actions @@ -182,18 +204,22 @@ context text, -- admin-only note appeal text -- null if no appeal lodged ); create index on parsav_sanctions (victim,scope); create index on parsav_sanctions (issuer); +comment on table parsav_sanctions is +'administrative actions taken against particular users, posts, rooms, or other entities'; create table parsav_actor_conf_strs ( uid bigint not null references parsav_actors(id) on delete cascade, key text not null, value text not null, unique (uid,key) ); create table parsav_actor_conf_ints ( uid bigint not null references parsav_actors(id) on delete cascade, key text not null, value bigint not null, unique (uid,key) ); +comment on table parsav_actor_conf_strs is 'per-user configuration settings (string properties)'; +comment on table parsav_actor_conf_ints is 'per-user configuration settings (integer and enumeration properties)'; -- create a temporary managed auth table; we can delete this later -- if it ends up being replaced with a view %include pgsql-auth.sql% Index: mem.t ================================================================== --- mem.t +++ mem.t @@ -175,7 +175,69 @@ return self.storage.ptr + idx else lib.bail('vector overrun!') end end return v end) + +struct m.pool { + -- implements growable memory pools. EVERY THREAD MUST HAVE ITS OWN + storage: &opaque + cursor: &opaque + sz: intptr +} + +terra m.pool:cue(sz: intptr) + if self.storage == nil then + self.storage = m.heapa_raw(sz) + self.cursor = self.storage + self.sz = sz + else + if self.sz >= sz then return self end + var ofs = [&uint8](self.cursor) - [&uint8](self.storage) + self.storage = m.heapr_raw(self.storage, sz) + self.cursor = [&opaque]([&uint8](self.storage) + ofs) + self.sz = sz + end + return self +end + +terra m.pool:init(sz: intptr) + self.storage = nil + self:cue(sz) + return self +end + +terra m.pool:free() + m.heapf(self.storage) + self.storage = nil + self.cursor = nil + self.sz = 0 +end + +terra m.pool:clear() + self.cursor = self.storage + return self +end + +terra m.pool:alloc_bytes(sz: intptr): &opaque + var space = self.sz - ([&uint8](self.cursor) - [&uint8](self.storage)) + if space < sz then self:cue(space + sz + 256) end + var ptr = self.cursor + self.cursor = [&opaque]([&uint8](self.cursor) + sz) + return ptr +end + +m.pool.methods.alloc = macro(function(self,ty,sz) + return `[ty](self:alloc_bytes(sizeof(ty) * sz)) +end) + +terra m.pool:frame() -- stack-style linear mgmt + return self.cursor +end + +terra m.pool:reset(frame: &opaque) + self.cursor = frame + return self +end + return m Index: mgtool.t ================================================================== --- mgtool.t +++ mgtool.t @@ -289,14 +289,23 @@ if lib.str.cmp(dbmode.arglist(0),'init') == 0 and dbmode.arglist.ct == 2 then lib.report('initializing new database structure for domain ', dbmode.arglist(1)) dlg:tx_enter() if dlg:dbsetup() then srv:conprep(lib.store.prepmode.conf) + + do var newkp = lib.crypt.genkp() + -- generate server privkey + var kbuf: uint8[lib.crypt.const.maxdersz] + var privsz = lib.crypt.der(false,&newkp, kbuf) + dlg:server_setup_self(dbmode.arglist(1), [lib.mem.ptr(uint8)] { + ptr = &kbuf[0], ct = privsz + }) + end + dlg:conf_set('instance-name', dbmode.arglist(1)) dlg:conf_set('domain', dbmode.arglist(1)) do var sec: int8[65] gensec(&sec[0]) - dlg:conf_set('server-secret', &sec[0]) dlg:conf_set('server-secret', &sec[0]) end lib.report('database setup complete; use mkroot to create an administrative user') else lib.bail('initialization process interrupted') end dlg:tx_complete() ADDED mime.t Index: mime.t ================================================================== --- mime.t +++ mime.t @@ -0,0 +1,13 @@ +local knowntypes = { + ['text/csrc'] = { + ext = 'c', lang = 'c'; + }; + ['text/html'] = { + ext = 'html', lang = 'html'; + unsafe = true; + }; + ['text/markdown'] = { + formatter = 'smackdown'; + ext = 'md', doc = true; + }; +} Index: parsav.md ================================================================== --- parsav.md +++ parsav.md @@ -3,11 +3,11 @@ **parsav** is a lightweight social media server written in [terra](https://terralang.org), intended to integrate to some degree with the fediverse. it is named for the [Ranuir](http://ʞ.cc/fic/spirals/ranuir) words *par* "speech, communication" and *sav* "unity, togetherness, solidarity". ## backends parsav is designed to be storage-agnostic, and can draw data from multiple backends at a time. backends can be enabled or disabled at compile time to avoid unnecessary dependencies. -* postgresql +* postgresql (backend `pgsql`) ## dependencies * runtime * mongoose @@ -23,13 +23,13 @@ * inkscape, for rendering out some of the UI graphics that can't be represented with standard svg * cwebp (libwebp package), for transforming inkscape PNGs to webp * sassc, for compiling the SCSS stylesheet into its final CSS -all builds require terra, which, unfortunately, requires installing an older version of llvm, v9 at the latest (which i develop parsav under). with any luck, your distro will be clever enough to package terra and its dependencies properly (it's trivial on nix, tho you'll need to tweak the terra expression to select a more recent llvm package); Arch Linux is one of those distros which is not so clever, and whose (AUR) terra package is totally broken. due to these unfortunate circumstances, terra is distributed not just in source form, but also in the the form of LLVM IR. +all builds require terra, which, unfortunately, requires installing an older version of llvm, v9 at the latest (which i develop parsav under). with any luck, your distro will be clever enough to package terra and its dependencies properly (it's trivial on nix, tho you'll need to tweak the terra expression to select a more recent llvm package if you want v9; this isn't necessary to successfully build parsav however); Arch Linux is one of those distros which is not so clever, and whose (AUR) terra package is totally broken. due to these unfortunate circumstances, terra is distributed not just in source form, but also in the the form of LLVM IR and x86-64 assembly + object code. -i've noticed that terra (at least with llvm9) seems to get a bit cantankerous and trigger llvm to fail with bizarre errors when you try to cross-compile parsav from x86-64 to any other platform, even x86-32. i don't know if this problem exists on other architectures or in what form. as a workaround, i've tried generating LLVM IR (ostensibly for x86-64, though this is in reality an architecture-independent language), and then compiling that down to an object file with llc. it doesn't work. the generated binaries seem to run but they crash with bizarre errors and are impossible to debug, as llc refuses to include debug symbols. for these reasons, parsav will (almost certainly) not run on any architecture besides x86-64, at least until terra and/or llvm are fixed. +i've noticed that terra (at least with llvm 6 and 9) seems to get a bit cantankerous and trigger llvm to fail with bizarre errors when you try to cross-compile parsav from x86-64 to any other platform, even x86-32. i don't know if this problem exists on other architectures or in what form. as a workaround, i've tried generating LLVM IR (putatively for x86-64, though this is an ostensibly architecture-independent language), and then compiling that down to an object file with llc. it doesn't work. the generated binaries seem to run but they crash with bizarre errors and are impossible to debug, as llc refuses to include debug symbols. for these reasons, parsav will (almost certainly) not run on any architecture besides x86-64, at least until terra and/or llvm are fixed. there is a very small possibility however that compiling natively on an ARM or x86-32 host might succeed. if you can pull it off, please let me know and i'll update the docs. also note that, while parsav has a flag to build with ASAN, ASAN has proven unusable for most purposes as it routinely reports false positive buffer-heap-overflows. if you figure out how to defuckulate this, i will be overjoyed. ## building @@ -37,11 +37,13 @@ postgresql-libs must be installed systemwide, as `parsav` does not currently provide for statically compiling and linking it if you use nixos and wish to build the pdf documentation, you're going to have to do a bit of extra work (but you're used to that, aren't you). for some incomprehensible reason, the groff package on nix is split up, seemingly randomly, with many crucial output devices relegated to the "perl" output of the package, which is not installed by default (and `nix-env -iA nixos.groff.perl` doesn't work either; i don't know why either). you'll have to instantiate and install the outputs directly by path, e.g. `nix-env -i /nix/store/*groff*/` to get everything you need into your profile. alas, the battle is not over: you also need to change the environment variables `GROFF_FONT_PATH` and `GROFF_TMAC_PATH` to point at the `font` and `tmac` subdirs of `~/.nix-profile/share/groff/$groff_version/`. once this is done, invoking `groff -Tpdf` will work as expected. -## configuring +unfortunately, the produced daemon binary is rather large, weighing in around 600K at the time of writing. you can reduce this significantly however by `strip`ping the binary, and reduce it further by compiling without debug functionality turned on (i.e. no debug symbols and no debug log level, both of which insert a large number of strings into the resulting object code). + +## configuration the `parsav` configuration is comprised of two components: the backends list and the config store. the backends list is a simple text file that tells `parsav` which data sources to draw from. the config store is a key-value store which contains the rest of the server's configuration, and is loaded from the backends. the configuration store can be spread across the backends; backends will be checked for configuration keys according to the order in which they are listed. changes to the configuration store affect parsav in real time; you only need to restart the server if you make a change to the backend list. you can directly modify the store from the command line with the `parsav conf` command; see `parsav conf -h` for more information. @@ -48,11 +50,11 @@ by default, parsav looks for a file called `backend.conf` in the current directory when it is launched. you can override this default with the `parsav_backend_file` environment or with the `-b`/`--backend-file` flag. `backend.conf` lists one backend per line, in the form `id type confstring`. for instance, if you had two postgresql databases, you might write a backend file like master pgsql host=localhost dbname=parsav tweets pgsql host=420.69.dread.cloud dbname=content -the form the configuration string takes depends on the specific backend. +the form the configuration string takes depends on the specific backend. for postgres, it's just the standard postgres connection string, and supports all the usual properties, as it's passed directly to the client library unmodified. once you've set up a backend and confirmed parsav can connect succesfully to it, you can initialize the database with the command `parsav db init `, where `` is the name of the domain name you will be hosting `parsav` from. this will install all necessary structures and functions in the target and create all necessary files. it will not, however, create any users. you can create an initial administrative user with the `parsav mkroot ` command, where `` is the handle you want to use on the server. this will also assign a temporary password for the user if possible. you should now be able to log in and administer the server. if something goes awry with your administrative account, don't fret! you can get your powers themselves back with the command `parsav user grant all`, and if you're having difficulties logging in, the command `parsav user auth pw reset` will give you a fresh password. if all else fails, you can always run `mkroot` again to create a new root account, and try to repair the damage from there. @@ -129,10 +131,12 @@ * lmdb * sqlite3 * generic odbc * lua * ldap for auth (and maybe actors?) -* cdb (for static content, maybe?) +* cdb (for static content, maybe? does this make sense?) * mariadb/mysql * the various nosql horrors, e.g. redis, mongo, and so on parsav urgently needs an internationalization framework as well. right now everything is just hardcoded in english. yuck. + +parsav could be significantly improved by adjusting its memory management strategy. instead of allocating everything with lib.mem.heapa (which currently maps to malloc on all platforms), we should allocate a static buffer for the server overlord object which can simply be cleared and re-used for each http request, and enlarged with `realloc` when necessary. the entire region could be `mlock`ed for better performance, and it would no longer be necessary to track and free memory, as the entire buffer would simply be discarded after use (similar to PHP's original memory management strategy). this would remove possibly the largest source of latency in the codebase, as `parsav` is regrettably quite heavy on malloc, performing numerous allocations for each page rendered. Index: render/nav.t ================================================================== --- render/nav.t +++ render/nav.t @@ -5,12 +5,14 @@ if co.who ~= nil or co.srv.cfg.pol_sec == lib.srv.secmode.public then t:lpush(' timeline') end if co.who ~= nil then t:lpush(' compose profile media configure docs log out notices') + t:lpush('">profile media configure docs
@') + t:push(co.who.handle,0) + t:lpush('
log out notices') else t:lpush(' docs log in') end return t:finalize() end return render_nav Index: route.t ================================================================== --- route.t +++ route.t @@ -362,10 +362,13 @@ if privs:sz() > 0 then lib.dbg('installing credential restrictions') lib.io.fmt('on priv %llu\n',aid) co.srv:auth_privs_set(aid, privs) end + + lib.dbg('setting netmask restrictions') + var nm = co:pgetv('netmask') end co:reroute('?') return end co:complain(400,'bad request','the operation you have requested is not meaningful in this context') Index: static/style.scss ================================================================== --- static/style.scss +++ static/style.scss @@ -212,10 +212,16 @@ all: unset; display: flex; justify-content: flex-end; align-items: center; grid-column: 2/3; grid-row: 1/2; + .ident { + color: tone(-20%); + margin-left: 0.2em; + border-left: 1px solid tone(-40%); + padding-left: 0.5em; + } > a[href] { display: block; padding: 0.25in 0.10in; //padding: calc((25% - 1em)/2) 0.15in; &, &::after { transition: 0.3s; } @@ -518,12 +524,15 @@ grid-template-rows: min-content max-content; margin-bottom: 0.1in; transition: 0.2s ease-out; >.avatar { grid-column: 1/2; grid-row: 1/2; - img { display: block; width: 1in; height: 1in; margin:0; } background: linear-gradient(to bottom, tone(-53%), tone(-57%)); + img { + display: block; width: 1in; height: 1in; margin:0; + border-right: 1px solid tone(-65%); + } } >a[href].username { display: block; grid-column: 1/3; grid-row: 2/3; Index: store.t ================================================================== --- store.t +++ store.t @@ -355,10 +355,12 @@ tx_complete: &m.source -> bool -- these two functions are special, in that they should be called -- directly on a specific backend, rather than passed down to the -- backends by the server; that is pathological behavior that will -- not have the desired effect + + server_setup_self: {&m.source, rawstring, lib.mem.ptr(uint8)} -> {} conf_get: {&m.source, rawstring} -> lib.mem.ptr(int8) conf_set: {&m.source, rawstring, rawstring} -> {} conf_reset: {&m.source, rawstring} -> {}