parsav  Check-in [8d35307a7f]

Overview
Comment:add memory pool impl, handle various little details, add beginnings of mimelib
Downloads: Tarball | ZIP archive | SQL archive
Timelines: family | ancestors | descendants | both | trunk
Files: files | file ages | folders
SHA3-256: 8d35307a7f76cc6624c69b3f11b1203c747d45596358531cd93d699f4265c35a
User & Date: lexi on 2021-01-10 03:54:46
Other Links: manifest | tags
Context
2021-01-10
08:19
begin replacing inefficient memory management with a pool-based solution; fix memory leaks check-in: 7c8769bf96 user: lexi tags: trunk
03:54
add memory pool impl, handle various little details, add beginnings of mimelib check-in: 8d35307a7f user: lexi tags: trunk
2021-01-09
07:15
user mgmt and rt improvements check-in: 05af79b909 user: lexi tags: trunk
Changes

Modified backend/pgsql.t from [175b022aff] to [794844a9c8].

1
2
3
4














5
6
7
8
9
10
11
....
1146
1147
1148
1149
1150
1151
1152

1153
1154
1155
1156










1157
1158
1159








1160
1161
1162
1163
1164
1165
1166
-- vim: ft=terra
local pstring = lib.mem.ptr(int8)
local binblob = lib.mem.ptr(uint8)
local queries = {














	conf_get = {
		params = {rawstring}, sql = [[
			select value from parsav_config
				where key = $1::text limit 1
		]];
	};

................................................................................
	tx_enter = txdo, tx_complete = txdone;

	conprep = [terra(src: &lib.store.source, mode: lib.store.prepmode.t): {}
		var [con] = [&lib.pq.PGconn](src.handle)
		if mode == lib.store.prepmode.full then [prep]
		elseif mode == lib.store.prepmode.conf or
		       mode == lib.store.prepmode.admin then 

			queries.conf_get.prep(con)
			queries.conf_set.prep(con)
			queries.conf_reset.prep(con)
			if mode == lib.store.prepmode.admin then 










			end
		else lib.bail('unsupported connection preparation mode') end
	end];









	dbsetup = [terra(src: &lib.store.source): bool
		var res = lib.pq.PQexec([&lib.pq.PGconn](src.handle), schema)
		if lib.pq.PQresultStatus(res) == lib.pq.PGRES_COMMAND_OK then
			lib.report('successfully instantiated schema in database')
			return true
		else




>
>
>
>
>
>
>
>
>
>
>
>
>
>







 







>




>
>
>
>
>
>
>
>
>
>



>
>
>
>
>
>
>
>







1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
....
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
-- vim: ft=terra
local pstring = lib.mem.ptr(int8)
local binblob = lib.mem.ptr(uint8)
local queries = {
	server_setup_self = {
		params = {rawstring,binblob,int64}, cmd = true, sql = [[
			insert into parsav_servers (id, domain, key, parsav, knownsince)
				values (0, $1::text, $2::bytea, true, $3::bigint)
		]];
	};

	server_fetch_sid = {
		params = {uint64}, sql = [[
			select domain, key, knownsince, parsav from parsav_servers
				where id = $1::bigint
		]];
	};

	conf_get = {
		params = {rawstring}, sql = [[
			select value from parsav_config
				where key = $1::text limit 1
		]];
	};

................................................................................
	tx_enter = txdo, tx_complete = txdone;

	conprep = [terra(src: &lib.store.source, mode: lib.store.prepmode.t): {}
		var [con] = [&lib.pq.PGconn](src.handle)
		if mode == lib.store.prepmode.full then [prep]
		elseif mode == lib.store.prepmode.conf or
		       mode == lib.store.prepmode.admin then 
			queries.server_setup_self.prep(con)
			queries.conf_get.prep(con)
			queries.conf_set.prep(con)
			queries.conf_reset.prep(con)
			if mode == lib.store.prepmode.admin then 
				queries.server_fetch_sid.prep(con)
				queries.actor_fetch_uid.prep(con)
				queries.actor_fetch_xid.prep(con)
				queries.actor_enum.prep(con)
				queries.actor_enum_local.prep(con)
				queries.actor_stats.prep(con)
				queries.actor_powers_fetch.prep(con)
				queries.actor_save.prep(con)
				queries.actor_create.prep(con)
				queries.actor_purge_uid.prep(con)
			end
		else lib.bail('unsupported connection preparation mode') end
	end];

	server_setup_self = [terra(
		src: &lib.store.source,
		domain: rawstring,
		key: binblob
	): {}
		queries.server_setup_self.exec(src,domain,key,lib.osclock.time(nil))
	end];

	dbsetup = [terra(src: &lib.store.source): bool
		var res = lib.pq.PQexec([&lib.pq.PGconn](src.handle), schema)
		if lib.pq.PQresultStatus(res) == lib.pq.PGRES_COMMAND_OK then
			lib.report('successfully instantiated schema in database')
			return true
		else

Modified backend/schema/pgsql.sql from [29d1b18fbc] to [bc736c4c1d].

1
2
3
4


5
6
7
8
9
10
11
..
17
18
19
20
21
22
23


24
25
26
27
28
29
30
..
37
38
39
40
41
42
43


44
45
46
47
48
49
50
51
52
53
54


55
56
57
58
59
60
61
..
81
82
83
84
85
86
87


88
89
90
91
92
93
94
95
96
97
98
99


100
101
102
103
104
105
106
107
108


109
110
111
112
113
114
115
116
117
118
119


120
121
122
123
124
125
126
127
128
129
130
131


132
133
134
135
136
137
138
...
143
144
145
146
147
148
149


150
151
152
153
154
155
156
...
165
166
167
168
169
170
171


172
173
174
175
176
177
178
...
180
181
182
183
184
185
186


187
188
189
190
191
192
193
194
195


196
197
198
199
create table parsav_config (
	key   text primary key,
	value text
);



insert into parsav_config (key,value) values ('schema-version','1'),
	('credential-store','managed');
--	('bind',:'bind'),
--	('domain',:'domain'),
--	('instance-name',:'inst'),
--	('policy-security',:'secmode'),
................................................................................
create table parsav_servers (
	id     bigint primary key default (1+random()*(2^63-1))::bigint,
	domain text not null unique,
	key    bytea,
	knownsince bigint,
	parsav boolean -- whether to use parsav protocol extensions
);



create table parsav_actors (
	id        bigint primary key default (1+random()*(2^63-1))::bigint,
	nym       text,
	handle    text not null, -- nym [@handle@origin] 
	origin    bigint references parsav_servers(id)
		on delete cascade, -- null origin = local actor
................................................................................
	invites   integer not null default 0,
	key       bytea, -- private if localactor; public if remote
	epithet   text,
	authtime  bigint not null, -- cookies earlier than this timepoint will not be accepted
	
	unique (handle,origin)
);



create table parsav_rights (
	key text,
	actor bigint references parsav_actors(id)
		on delete cascade,
	allow boolean not null,
	scope bigint, -- for future expansion

	primary key (key,actor)
);
create index on parsav_rights (actor);



create table parsav_posts (
	id         bigint primary key default (1+random()*(2^63-1))::bigint,
	author     bigint references parsav_actors(id) on delete cascade,
	subject    text,
	acl        text not null default 'all', -- just store the script raw 🤷
	body       text,
................................................................................
		on delete cascade, -- e.g. follower
	relatee bigint references parsav_actors(id)
		on delete cascade, -- e.g. followed
	kind    smallint, -- e.g. follow, block, mute

	primary key (relator, relatee, kind)
);



create table parsav_acts (
	id      bigint primary key default (1+random()*(2^63-1))::bigint,
	kind    text not null, -- like, rt, react, so on
	time    bigint not null,
	actor   bigint references parsav_actors(id) on delete cascade,
	subject bigint, -- may be post or act, depending on kind
	body	text -- emoji, if react
);
create index on parsav_acts (subject);
create index on parsav_acts (actor);
create index on parsav_acts (time);



create table parsav_log (
	-- accesses are tracked for security & sending delete acts
	id    bigint primary key default (1+random()*(2^63-1))::bigint,
	time  bigint not null,
	actor bigint references parsav_actors(id)
		on delete cascade,
	post  bigint not null
);



create table parsav_artifacts (
	id          bigint primary key default (1+random()*(2^63-1))::bigint,
	birth       bigint not null,
	content     bytea, -- if null, this is a "ban record" preventing content matching the hash from being re-uploaded
	hash		bytea unique not null, -- sha256 hash of content
	-- it would be cool to use a computed column for this, but i don't want
	-- to lock people into PG12 or drag in the pgcrypto extension just for this
	mime        text -- null if unknown, will be reported as x-octet-stream
);
create index on parsav_artifacts (mime);



create table parsav_artifact_claims (
	birth bigint not null,
	uid bigint references parsav_actors(id) on delete cascade,
	rid bigint references parsav_artifacts(id) on delete cascade,
	description text,
	folder text,

	unique (uid,rid)
);
create index on parsav_artifact_claims (uid);
create index on parsav_artifact_claims (uid,folder);



create table parsav_circles (
	id          bigint primary key default (1+random()*(2^63-1))::bigint,
	owner       bigint not null references parsav_actors(id) on delete cascade,
	name        text not null,
	members     bigint[] not null default array[]::bigint[],

................................................................................
create table parsav_rooms (
	id          bigint primary key default (1+random()*(2^63-1))::bigint,
	origin		bigint references parsav_servers(id) on delete cascade,
	name		text not null,
	description text not null,
	policy      smallint not null
);



create table parsav_room_members (
	room   bigint not null references parsav_rooms(id) on delete cascade,
	member bigint not null references parsav_actors(id) on delete cascade,
	rank   smallint not null default 0,
	admin  boolean not null default false, -- non-admins with rank can only moderate + invite
	title  text, -- admin-granted title like reddit flair
................................................................................
	-- ID becomes the user ID. privileges granted on the invite ID during the invite
	-- process are thus inherited by the user
	issuer bigint references parsav_actors(id) on delete set null,
	handle text, -- admin can lock invite to specific handle
	rank   smallint not null default 0,
	quota  integer not null  default 1000
);



create table parsav_sanctions (
	id     bigint primary key default (1+random()*(2^63-1))::bigint,
	issuer bigint references parsav_actors(id) on delete set null,
	scope  bigint, -- can be null or room for local actions
	nature smallint not null, -- silence, suspend, disemvowel, censor, noreply, etc
	victim bigint not null, -- can be user, room, or post
................................................................................
	review bigint,  -- brings up for review at given time if set
	reason text, -- visible to victim if set
	context text, -- admin-only note
	appeal text -- null if no appeal lodged
);
create index on parsav_sanctions (victim,scope);
create index on parsav_sanctions (issuer);



create table parsav_actor_conf_strs (
	uid bigint not null references parsav_actors(id) on delete cascade,
	key text not null, value text not null, unique (uid,key)
);
create table parsav_actor_conf_ints (
	uid bigint not null references parsav_actors(id) on delete cascade,
	key text not null, value bigint not null, unique (uid,key)
);



-- create a temporary managed auth table; we can delete this later
-- if it ends up being replaced with a view
%include pgsql-auth.sql%




>
>







 







>
>







 







>
>











>
>







 







>
>







|




>
>









>
>








|


>
>












>
>







 







>
>







 







>
>







 







>
>









>
>




1
2
3
4
5
6
7
8
9
10
11
12
13
..
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
..
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
..
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
...
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
...
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
...
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
create table parsav_config (
	key   text primary key,
	value text
);
comment on table parsav_config is
'server-wide configuration variables. highly sensitive!';

insert into parsav_config (key,value) values ('schema-version','1'),
	('credential-store','managed');
--	('bind',:'bind'),
--	('domain',:'domain'),
--	('instance-name',:'inst'),
--	('policy-security',:'secmode'),
................................................................................
create table parsav_servers (
	id     bigint primary key default (1+random()*(2^63-1))::bigint,
	domain text not null unique,
	key    bytea,
	knownsince bigint,
	parsav boolean -- whether to use parsav protocol extensions
);
comment on table parsav_servers is
'all servers known to the parsav instance. the local server (including its private key) is stored in row (id = 0)';

create table parsav_actors (
	id        bigint primary key default (1+random()*(2^63-1))::bigint,
	nym       text,
	handle    text not null, -- nym [@handle@origin] 
	origin    bigint references parsav_servers(id)
		on delete cascade, -- null origin = local actor
................................................................................
	invites   integer not null default 0,
	key       bytea, -- private if localactor; public if remote
	epithet   text,
	authtime  bigint not null, -- cookies earlier than this timepoint will not be accepted
	
	unique (handle,origin)
);
comment on table parsav_actors is
'all users known to the instance across the fediverse; local users satisfy constraint (origin = 0)';

create table parsav_rights (
	key text,
	actor bigint references parsav_actors(id)
		on delete cascade,
	allow boolean not null,
	scope bigint, -- for future expansion

	primary key (key,actor)
);
create index on parsav_rights (actor);
comment on table parsav_rights is
'a backward-compatible list of every non-default privilege or deprivilege granted to a local user';

create table parsav_posts (
	id         bigint primary key default (1+random()*(2^63-1))::bigint,
	author     bigint references parsav_actors(id) on delete cascade,
	subject    text,
	acl        text not null default 'all', -- just store the script raw 🤷
	body       text,
................................................................................
		on delete cascade, -- e.g. follower
	relatee bigint references parsav_actors(id)
		on delete cascade, -- e.g. followed
	kind    smallint, -- e.g. follow, block, mute

	primary key (relator, relatee, kind)
);
comment on table parsav_rels is
'all relationships, positive and negative, between local users and other users; kind is a version-specific integer mapping to a type-of-relationship enum in store.t';

create table parsav_acts (
	id      bigint primary key default (1+random()*(2^63-1))::bigint,
	kind    text not null, -- like, rt, react, so on
	time    bigint not null,
	actor   bigint references parsav_actors(id) on delete cascade,
	subject bigint, -- may be post or act, depending on kind
	body	text -- emoji, if react; complaint, if report
);
create index on parsav_acts (subject);
create index on parsav_acts (actor);
create index on parsav_acts (time);
comment on table parsav_acts is
'every simple action taken on a tweet by an actor, including likes, rts, reacts, and reports';

create table parsav_log (
	-- accesses are tracked for security & sending delete acts
	id    bigint primary key default (1+random()*(2^63-1))::bigint,
	time  bigint not null,
	actor bigint references parsav_actors(id)
		on delete cascade,
	post  bigint not null
);
comment on table parsav_log is
'a log of accesses from foreign servers, tracking which will be sent update & delete events for each post';

create table parsav_artifacts (
	id          bigint primary key default (1+random()*(2^63-1))::bigint,
	birth       bigint not null,
	content     bytea, -- if null, this is a "ban record" preventing content matching the hash from being re-uploaded
	hash		bytea unique not null, -- sha256 hash of content
	-- it would be cool to use a computed column for this, but i don't want
	-- to lock people into PG12 or drag in the pgcrypto extension just for this
	mime        text -- null if unknown, will be reported as octet-stream
);
create index on parsav_artifacts (mime);
comment on table parsav_artifacts is
'deduplicated media files uploaded by users';

create table parsav_artifact_claims (
	birth bigint not null,
	uid bigint references parsav_actors(id) on delete cascade,
	rid bigint references parsav_artifacts(id) on delete cascade,
	description text,
	folder text,

	unique (uid,rid)
);
create index on parsav_artifact_claims (uid);
create index on parsav_artifact_claims (uid,folder);
comment on table parsav_artifact_claims is
'a list of users who have an ownership interest in each artifact (effectively an index of GC roots)';

create table parsav_circles (
	id          bigint primary key default (1+random()*(2^63-1))::bigint,
	owner       bigint not null references parsav_actors(id) on delete cascade,
	name        text not null,
	members     bigint[] not null default array[]::bigint[],

................................................................................
create table parsav_rooms (
	id          bigint primary key default (1+random()*(2^63-1))::bigint,
	origin		bigint references parsav_servers(id) on delete cascade,
	name		text not null,
	description text not null,
	policy      smallint not null
);
comment on table parsav_rooms is
'an index of user-created chatrooms';

create table parsav_room_members (
	room   bigint not null references parsav_rooms(id) on delete cascade,
	member bigint not null references parsav_actors(id) on delete cascade,
	rank   smallint not null default 0,
	admin  boolean not null default false, -- non-admins with rank can only moderate + invite
	title  text, -- admin-granted title like reddit flair
................................................................................
	-- ID becomes the user ID. privileges granted on the invite ID during the invite
	-- process are thus inherited by the user
	issuer bigint references parsav_actors(id) on delete set null,
	handle text, -- admin can lock invite to specific handle
	rank   smallint not null default 0,
	quota  integer not null  default 1000
);
comment on table parsav_invites is
'all active invitations and the level of authority they grant if accepted';

create table parsav_sanctions (
	id     bigint primary key default (1+random()*(2^63-1))::bigint,
	issuer bigint references parsav_actors(id) on delete set null,
	scope  bigint, -- can be null or room for local actions
	nature smallint not null, -- silence, suspend, disemvowel, censor, noreply, etc
	victim bigint not null, -- can be user, room, or post
................................................................................
	review bigint,  -- brings up for review at given time if set
	reason text, -- visible to victim if set
	context text, -- admin-only note
	appeal text -- null if no appeal lodged
);
create index on parsav_sanctions (victim,scope);
create index on parsav_sanctions (issuer);
comment on table parsav_sanctions is
'administrative actions taken against particular users, posts, rooms, or other entities';

create table parsav_actor_conf_strs (
	uid bigint not null references parsav_actors(id) on delete cascade,
	key text not null, value text not null, unique (uid,key)
);
create table parsav_actor_conf_ints (
	uid bigint not null references parsav_actors(id) on delete cascade,
	key text not null, value bigint not null, unique (uid,key)
);
comment on table parsav_actor_conf_strs is 'per-user configuration settings (string properties)';
comment on table parsav_actor_conf_ints is 'per-user configuration settings (integer and enumeration properties)';

-- create a temporary managed auth table; we can delete this later
-- if it ends up being replaced with a view
%include pgsql-auth.sql%

Modified mem.t from [a0c3213659] to [de24aef2de].

173
174
175
176
177
178
179
180






























































181
	v.metamethods.__apply = terra(self: &v, idx: intptr): &ty -- no index??
		if self.sz > idx then
			return self.storage.ptr + idx
		else lib.bail('vector overrun!') end
	end
	return v 
end)































































return m








>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>

173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
	v.metamethods.__apply = terra(self: &v, idx: intptr): &ty -- no index??
		if self.sz > idx then
			return self.storage.ptr + idx
		else lib.bail('vector overrun!') end
	end
	return v 
end)

struct m.pool {
 -- implements growable memory pools. EVERY THREAD MUST HAVE ITS OWN
	storage: &opaque
	cursor: &opaque
	sz: intptr
}

terra m.pool:cue(sz: intptr)
	if self.storage == nil then
		self.storage = m.heapa_raw(sz)
		self.cursor = self.storage
		self.sz = sz
	else
		if self.sz >= sz then return self end
		var ofs = [&uint8](self.cursor) - [&uint8](self.storage)
		self.storage = m.heapr_raw(self.storage, sz)
		self.cursor = [&opaque]([&uint8](self.storage) + ofs)
		self.sz = sz
	end
	return self
end

terra m.pool:init(sz: intptr)
	self.storage = nil
	self:cue(sz)
	return self
end

terra m.pool:free()
	m.heapf(self.storage)
	self.storage = nil
	self.cursor = nil
	self.sz = 0
end

terra m.pool:clear()
	self.cursor = self.storage
	return self
end

terra m.pool:alloc_bytes(sz: intptr): &opaque
	var space = self.sz - ([&uint8](self.cursor) - [&uint8](self.storage))
	if space < sz then self:cue(space + sz + 256) end
	var ptr = self.cursor
	self.cursor = [&opaque]([&uint8](self.cursor) + sz)
	return ptr
end

m.pool.methods.alloc = macro(function(self,ty,sz)
	return `[ty](self:alloc_bytes(sizeof(ty) * sz))
end)

terra m.pool:frame() -- stack-style linear mgmt
	return self.cursor
end

terra m.pool:reset(frame: &opaque)
	self.cursor = frame
	return self
end


return m

Modified mgtool.t from [b1a646e421] to [b40be7a821].

287
288
289
290
291
292
293










294
295
296
297
298
299
300
301
302
303
304

			srv:setup(cnf) 
			if lib.str.cmp(dbmode.arglist(0),'init') == 0 and dbmode.arglist.ct == 2 then
				lib.report('initializing new database structure for domain ', dbmode.arglist(1))
				dlg:tx_enter()
				if dlg:dbsetup() then
					srv:conprep(lib.store.prepmode.conf)










					dlg:conf_set('instance-name', dbmode.arglist(1))
					dlg:conf_set('domain', dbmode.arglist(1))
					do var sec: int8[65] gensec(&sec[0])
						dlg:conf_set('server-secret', &sec[0])
						dlg:conf_set('server-secret', &sec[0])
					end
					lib.report('database setup complete; use mkroot to create an administrative user')
				else lib.bail('initialization process interrupted') end
				dlg:tx_complete()
			elseif lib.str.cmp(dbmode.arglist(0),'obliterate') == 0 then
				var cfmstr: int8[64] gen_cfstr(&cfmstr[0],0)







>
>
>
>
>
>
>
>
>
>



<







287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306

307
308
309
310
311
312
313

			srv:setup(cnf) 
			if lib.str.cmp(dbmode.arglist(0),'init') == 0 and dbmode.arglist.ct == 2 then
				lib.report('initializing new database structure for domain ', dbmode.arglist(1))
				dlg:tx_enter()
				if dlg:dbsetup() then
					srv:conprep(lib.store.prepmode.conf)

					do var newkp = lib.crypt.genkp()
					 -- generate server privkey
						var kbuf: uint8[lib.crypt.const.maxdersz]
						var privsz = lib.crypt.der(false,&newkp, kbuf)
						dlg:server_setup_self(dbmode.arglist(1), [lib.mem.ptr(uint8)] {
							ptr = &kbuf[0], ct = privsz
						})
					end

					dlg:conf_set('instance-name', dbmode.arglist(1))
					dlg:conf_set('domain', dbmode.arglist(1))
					do var sec: int8[65] gensec(&sec[0])

						dlg:conf_set('server-secret', &sec[0])
					end
					lib.report('database setup complete; use mkroot to create an administrative user')
				else lib.bail('initialization process interrupted') end
				dlg:tx_complete()
			elseif lib.str.cmp(dbmode.arglist(0),'obliterate') == 0 then
				var cfmstr: int8[64] gen_cfstr(&cfmstr[0],0)

Added mime.t version [2e40a434e4].



























>
>
>
>
>
>
>
>
>
>
>
>
>
1
2
3
4
5
6
7
8
9
10
11
12
13
local knowntypes = {
	['text/csrc'] = {
		ext = 'c', lang = 'c';
	};
	['text/html'] = {
		ext = 'html', lang = 'html';
		unsafe = true;
	};
	['text/markdown'] = {
		formatter = 'smackdown';
		ext = 'md', doc = true;
	};
}

Modified parsav.md from [52b33381db] to [1e18c01a8c].

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
..
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41


42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
...
127
128
129
130
131
132
133
134
135
136
137
138


# parsav

**parsav** is a lightweight social media server written in [terra](https://terralang.org), intended to integrate to some degree with the fediverse. it is named for the [Ranuir](http://Êž.cc/fic/spirals/ranuir) words *par* "speech, communication" and *sav* "unity, togetherness, solidarity".

## backends
parsav is designed to be storage-agnostic, and can draw data from multiple backends at a time. backends can be enabled or disabled at compile time to avoid unnecessary dependencies.

* postgresql

## dependencies

* runtime
  * mongoose
  * json-c
  * mbedtls
................................................................................

additional preconfigure dependencies are necessary if you are building directly from trunk, rather than from a release tarball that includes certain build artifacts which need to be embedded in the binary:

* inkscape, for rendering out some of the UI graphics that can't be represented with standard svg
* cwebp (libwebp package), for transforming inkscape PNGs to webp
* sassc, for compiling the SCSS stylesheet into its final CSS

all builds require terra, which, unfortunately, requires installing an older version of llvm, v9 at the latest (which i develop parsav under). with any luck, your distro will be clever enough to package terra and its dependencies properly (it's trivial on nix, tho you'll need to tweak the terra expression to select a more recent llvm package); Arch Linux is one of those distros which is not so clever, and whose (AUR) terra package is totally broken. due to these unfortunate circumstances, terra is distributed not just in source form, but also in the the form of LLVM IR. 

i've noticed that terra (at least with llvm9) seems to get a bit cantankerous and trigger llvm to fail with bizarre errors when you try to cross-compile parsav from x86-64 to any other platform, even x86-32. i don't know if this problem exists on other architectures or in what form. as a workaround, i've tried generating LLVM IR (ostensibly for x86-64, though this is in reality an architecture-independent language), and then compiling that down to an object file with llc. it doesn't work. the generated binaries seem to run but they crash with bizarre errors and are impossible to debug, as llc refuses to include debug symbols. for these reasons, parsav will (almost certainly) not run on any architecture besides x86-64, at least until terra and/or llvm are fixed.

also note that, while parsav has a flag to build with ASAN, ASAN has proven unusable for most purposes as it routinely reports false positive buffer-heap-overflows. if you figure out how to defuckulate this, i will be overjoyed.

## building

first, either install any missing dependencies as shared libraries, or build them as static libraries with the command `make dep.$LIBRARY`. as a shortcut, `make dep` will build all dependencies as static libraries. note that if the build system finds a static version of a library in the `lib/` folder, it will use that instead of any system library. note that these commands require GNU make (it may be installed as `gmake` on your system), although this is a fairly soft dependency -- if you really need to build it on BSD make, you can probably translate it with a minute or so of work; you'll just have to do some of the various gmake functions' work manually. this may be worthwhile if you're packaging for a BSD.

postgresql-libs must be installed systemwide, as `parsav` does not currently provide for statically compiling and linking it

if you use nixos and wish to build the pdf documentation, you're going to have to do a bit of extra work (but you're used to that, aren't you). for some incomprehensible reason, the groff package on nix is split up, seemingly randomly, with many crucial output devices relegated to the "perl" output of the package, which is not installed by default (and `nix-env -iA nixos.groff.perl` doesn't work either; i don't know why either). you'll have to instantiate and install the outputs directly by path, e.g. `nix-env -i /nix/store/*groff*/` to get everything you need into your profile. alas, the battle is not over: you also need to change the environment variables `GROFF_FONT_PATH` and `GROFF_TMAC_PATH` to point at the `font` and `tmac` subdirs of `~/.nix-profile/share/groff/$groff_version/`. once this is done, invoking `groff -Tpdf` will work as expected.



## configuring

the `parsav` configuration is comprised of two components: the backends list and the config store. the backends list is a simple text file that tells `parsav` which data sources to draw from. the config store is a key-value store which contains the rest of the server's configuration, and is loaded from the backends. the configuration store can be spread across the backends; backends will be checked for configuration keys according to the order in which they are listed. changes to the configuration store affect parsav in real time; you only need to restart the server if you make a change to the backend list.

you can directly modify the store from the command line with the `parsav conf` command; see `parsav conf -h` for more information.

by default, parsav looks for a file called `backend.conf` in the current directory when it is launched. you can override this default with the `parsav_backend_file` environment or with the `-b`/`--backend-file` flag. `backend.conf` lists one backend per line, in the form `id type confstring`. for instance, if you had two postgresql databases, you might write a backend file like

    master   pgsql   host=localhost dbname=parsav
	tweets   pgsql   host=420.69.dread.cloud dbname=content

the form the configuration string takes depends on the specific backend.

once you've set up a backend and confirmed parsav can connect succesfully to it, you can initialize the database with the command `parsav db init <domain>`, where `<domain>` is the name of the domain name you will be hosting `parsav` from. this will install all necessary structures and functions in the target and create all necessary files. it will not, however, create any users. you can create an initial administrative user with the `parsav mkroot <handle>` command, where `<handle>` is the handle you want to use on the server. this will also assign a temporary password for the user if possible. you should now be able to log in and administer the server.

if something goes awry with your administrative account, don't fret! you can get your powers themselves back with the command `parsav user <handle> grant all`, and if you're having difficulties logging in, the command `parsav user <handle> auth pw reset` will give you a fresh password. if all else fails, you can always run `mkroot` again to create a new root account, and try to repair the damage from there.

by default, parsav binds to [::1]:10917. if you want to change this (to run it on a different port, or make it directly accessible to other servers on the network), you can use the command `parsav conf set bind <address>`, where `address` is a binding specification like `0.0.0.0:80`. it is recommended, however, that `parsavd` be kept accessible only from localhost, and that connections be forwarded to it from nginx, haproxy, or a similar reverse proxy. (this can also be changed with the online configuration UI)

................................................................................

* plain text/filesystem storage
* lmdb
* sqlite3
* generic odbc
* lua
* ldap for auth (and maybe actors?)
* cdb (for static content, maybe?)
* mariadb/mysql
* the various nosql horrors, e.g. redis, mongo, and so on

parsav urgently needs an internationalization framework as well. right now everything is just hardcoded in english. yuck.









|







 







|

|











>
>
|










|







 







|




>
>
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
..
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
...
129
130
131
132
133
134
135
136
137
138
139
140
141
142
# parsav

**parsav** is a lightweight social media server written in [terra](https://terralang.org), intended to integrate to some degree with the fediverse. it is named for the [Ranuir](http://Êž.cc/fic/spirals/ranuir) words *par* "speech, communication" and *sav* "unity, togetherness, solidarity".

## backends
parsav is designed to be storage-agnostic, and can draw data from multiple backends at a time. backends can be enabled or disabled at compile time to avoid unnecessary dependencies.

* postgresql (backend `pgsql`)

## dependencies

* runtime
  * mongoose
  * json-c
  * mbedtls
................................................................................

additional preconfigure dependencies are necessary if you are building directly from trunk, rather than from a release tarball that includes certain build artifacts which need to be embedded in the binary:

* inkscape, for rendering out some of the UI graphics that can't be represented with standard svg
* cwebp (libwebp package), for transforming inkscape PNGs to webp
* sassc, for compiling the SCSS stylesheet into its final CSS

all builds require terra, which, unfortunately, requires installing an older version of llvm, v9 at the latest (which i develop parsav under). with any luck, your distro will be clever enough to package terra and its dependencies properly (it's trivial on nix, tho you'll need to tweak the terra expression to select a more recent llvm package if you want v9; this isn't necessary to successfully build parsav however); Arch Linux is one of those distros which is not so clever, and whose (AUR) terra package is totally broken. due to these unfortunate circumstances, terra is distributed not just in source form, but also in the the form of LLVM IR and x86-64 assembly + object code. 

i've noticed that terra (at least with llvm 6 and 9) seems to get a bit cantankerous and trigger llvm to fail with bizarre errors when you try to cross-compile parsav from x86-64 to any other platform, even x86-32. i don't know if this problem exists on other architectures or in what form. as a workaround, i've tried generating LLVM IR (putatively for x86-64, though this is an ostensibly architecture-independent language), and then compiling that down to an object file with llc. it doesn't work. the generated binaries seem to run but they crash with bizarre errors and are impossible to debug, as llc refuses to include debug symbols. for these reasons, parsav will (almost certainly) not run on any architecture besides x86-64, at least until terra and/or llvm are fixed. there is a very small possibility however that compiling natively on an ARM or x86-32 host might succeed. if you can pull it off, please let me know and i'll update the docs.

also note that, while parsav has a flag to build with ASAN, ASAN has proven unusable for most purposes as it routinely reports false positive buffer-heap-overflows. if you figure out how to defuckulate this, i will be overjoyed.

## building

first, either install any missing dependencies as shared libraries, or build them as static libraries with the command `make dep.$LIBRARY`. as a shortcut, `make dep` will build all dependencies as static libraries. note that if the build system finds a static version of a library in the `lib/` folder, it will use that instead of any system library. note that these commands require GNU make (it may be installed as `gmake` on your system), although this is a fairly soft dependency -- if you really need to build it on BSD make, you can probably translate it with a minute or so of work; you'll just have to do some of the various gmake functions' work manually. this may be worthwhile if you're packaging for a BSD.

postgresql-libs must be installed systemwide, as `parsav` does not currently provide for statically compiling and linking it

if you use nixos and wish to build the pdf documentation, you're going to have to do a bit of extra work (but you're used to that, aren't you). for some incomprehensible reason, the groff package on nix is split up, seemingly randomly, with many crucial output devices relegated to the "perl" output of the package, which is not installed by default (and `nix-env -iA nixos.groff.perl` doesn't work either; i don't know why either). you'll have to instantiate and install the outputs directly by path, e.g. `nix-env -i /nix/store/*groff*/` to get everything you need into your profile. alas, the battle is not over: you also need to change the environment variables `GROFF_FONT_PATH` and `GROFF_TMAC_PATH` to point at the `font` and `tmac` subdirs of `~/.nix-profile/share/groff/$groff_version/`. once this is done, invoking `groff -Tpdf` will work as expected.

unfortunately, the produced daemon binary is rather large, weighing in around 600K at the time of writing. you can reduce this significantly however by `strip`ping the binary, and reduce it further by compiling without debug functionality turned on (i.e. no debug symbols and no debug log level, both of which insert a large number of strings into the resulting object code).

## configuration

the `parsav` configuration is comprised of two components: the backends list and the config store. the backends list is a simple text file that tells `parsav` which data sources to draw from. the config store is a key-value store which contains the rest of the server's configuration, and is loaded from the backends. the configuration store can be spread across the backends; backends will be checked for configuration keys according to the order in which they are listed. changes to the configuration store affect parsav in real time; you only need to restart the server if you make a change to the backend list.

you can directly modify the store from the command line with the `parsav conf` command; see `parsav conf -h` for more information.

by default, parsav looks for a file called `backend.conf` in the current directory when it is launched. you can override this default with the `parsav_backend_file` environment or with the `-b`/`--backend-file` flag. `backend.conf` lists one backend per line, in the form `id type confstring`. for instance, if you had two postgresql databases, you might write a backend file like

    master   pgsql   host=localhost dbname=parsav
	tweets   pgsql   host=420.69.dread.cloud dbname=content

the form the configuration string takes depends on the specific backend. for postgres, it's just the standard postgres connection string, and supports all the usual properties, as it's passed directly to the client library unmodified.

once you've set up a backend and confirmed parsav can connect succesfully to it, you can initialize the database with the command `parsav db init <domain>`, where `<domain>` is the name of the domain name you will be hosting `parsav` from. this will install all necessary structures and functions in the target and create all necessary files. it will not, however, create any users. you can create an initial administrative user with the `parsav mkroot <handle>` command, where `<handle>` is the handle you want to use on the server. this will also assign a temporary password for the user if possible. you should now be able to log in and administer the server.

if something goes awry with your administrative account, don't fret! you can get your powers themselves back with the command `parsav user <handle> grant all`, and if you're having difficulties logging in, the command `parsav user <handle> auth pw reset` will give you a fresh password. if all else fails, you can always run `mkroot` again to create a new root account, and try to repair the damage from there.

by default, parsav binds to [::1]:10917. if you want to change this (to run it on a different port, or make it directly accessible to other servers on the network), you can use the command `parsav conf set bind <address>`, where `address` is a binding specification like `0.0.0.0:80`. it is recommended, however, that `parsavd` be kept accessible only from localhost, and that connections be forwarded to it from nginx, haproxy, or a similar reverse proxy. (this can also be changed with the online configuration UI)

................................................................................

* plain text/filesystem storage
* lmdb
* sqlite3
* generic odbc
* lua
* ldap for auth (and maybe actors?)
* cdb (for static content, maybe? does this make sense?)
* mariadb/mysql
* the various nosql horrors, e.g. redis, mongo, and so on

parsav urgently needs an internationalization framework as well. right now everything is just hardcoded in english. yuck.

parsav could be significantly improved by adjusting its memory management strategy. instead of allocating everything with lib.mem.heapa (which currently maps to malloc on all platforms), we should allocate a static buffer for the server overlord object which can simply be cleared and re-used for each http request, and enlarged with `realloc` when necessary. the entire region could be `mlock`ed for better performance, and it would no longer be necessary to track and free memory, as the entire buffer would simply be discarded after use (similar to PHP's original memory management strategy). this would remove possibly the largest source of latency in the codebase, as `parsav` is regrettably quite heavy on malloc, performing numerous allocations for each page rendered.

Modified render/nav.t from [50b4e7c2b2] to [0fd87c81ae].

3
4
5
6
7
8
9
10


11
12
13
14
15
16
render_nav(co: &lib.srv.convo)
	var t: lib.str.acc t:init(64)
	if co.who ~= nil or co.srv.cfg.pol_sec == lib.srv.secmode.public then
		t:lpush(' <a accesskey="t" href="/">timeline</a>')
	end
	if co.who ~= nil then
		t:lpush(' <a accesskey="c" href="/compose">compose</a> <a accesskey="p" href="/'):push(co.who.xid,0)
		t:lpush('">profile</a> <a accesskey="m" href="/media">media</a> <a accesskey="o" href="/conf">configure</a> <a accesskey="d" href="/doc">docs</a> <a accesskey="g" href="/logout">log out</a> <a class="bell" accesskey="x" href="/notices">notices</a>')


	else
		t:lpush(' <a accesskey="d" href="/doc">docs</a> <a accesskey="g" href="/login">log in</a>')
	end
	return t:finalize()
end
return render_nav







|
>
>






3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
render_nav(co: &lib.srv.convo)
	var t: lib.str.acc t:init(64)
	if co.who ~= nil or co.srv.cfg.pol_sec == lib.srv.secmode.public then
		t:lpush(' <a accesskey="t" href="/">timeline</a>')
	end
	if co.who ~= nil then
		t:lpush(' <a accesskey="c" href="/compose">compose</a> <a accesskey="p" href="/'):push(co.who.xid,0)
		t:lpush('">profile</a> <a accesskey="m" href="/media">media</a> <a accesskey="o" href="/conf">configure</a> <a accesskey="d" href="/doc">docs</a> <div class="ident">@')
		t:push(co.who.handle,0)
		t:lpush('</div> <a accesskey="g" href="/logout">log out</a> <a class="bell" accesskey="x" href="/notices">notices</a>')
	else
		t:lpush(' <a accesskey="d" href="/doc">docs</a> <a accesskey="g" href="/login">log in</a>')
	end
	return t:finalize()
end
return render_nav

Modified route.t from [a8702e1420] to [2b1fe64d41].

360
361
362
363
364
365
366



367
368
369
370
371
372
373
			end)()]
			privs:dump()
			if privs:sz() > 0 then
				lib.dbg('installing credential restrictions')
				lib.io.fmt('on priv %llu\n',aid)
				co.srv:auth_privs_set(aid, privs)
			end



		end
		co:reroute('?')
		return
	end
	co:complain(400,'bad request','the operation you have requested is not meaningful in this context')
end








>
>
>







360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
			end)()]
			privs:dump()
			if privs:sz() > 0 then
				lib.dbg('installing credential restrictions')
				lib.io.fmt('on priv %llu\n',aid)
				co.srv:auth_privs_set(aid, privs)
			end

			lib.dbg('setting netmask restrictions')
			var nm = co:pgetv('netmask')
		end
		co:reroute('?')
		return
	end
	co:complain(400,'bad request','the operation you have requested is not meaningful in this context')
end

Modified static/style.scss from [f9852f8724] to [ba9256aed1].

210
211
212
213
214
215
216






217
218
219
220
221
222
223
...
516
517
518
519
520
521
522
523
524




525
526
527
528
529
530
531
		}
		nav {
			all: unset;
			display: flex;
			justify-content: flex-end;
			align-items: center;
			grid-column: 2/3; grid-row: 1/2;






			> a[href] {
				display: block;
				padding: 0.25in 0.10in;
				//padding: calc((25% - 1em)/2) 0.15in;
				&, &::after { transition: 0.3s; }
				text-shadow: 1px 1px 1px black;
				&:hover{
................................................................................
	margin: unset;
	grid-template-columns: 1in 1fr max-content max-content;
	grid-template-rows: min-content max-content;
	margin-bottom: 0.1in;
	transition: 0.2s ease-out;
	>.avatar {
		grid-column: 1/2; grid-row: 1/2;
		img { display: block; width: 1in; height: 1in; margin:0; }
		background: linear-gradient(to bottom, tone(-53%), tone(-57%));




	}
	>a[href].username {
		display: block;
		grid-column: 1/3;
		grid-row: 2/3;
		text-align: left;
		text-decoration: none;







>
>
>
>
>
>







 







<

>
>
>
>







210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
...
522
523
524
525
526
527
528

529
530
531
532
533
534
535
536
537
538
539
540
		}
		nav {
			all: unset;
			display: flex;
			justify-content: flex-end;
			align-items: center;
			grid-column: 2/3; grid-row: 1/2;
			.ident {
				color: tone(-20%);
				margin-left: 0.2em;
				border-left: 1px solid tone(-40%);
				padding-left: 0.5em;
			}
			> a[href] {
				display: block;
				padding: 0.25in 0.10in;
				//padding: calc((25% - 1em)/2) 0.15in;
				&, &::after { transition: 0.3s; }
				text-shadow: 1px 1px 1px black;
				&:hover{
................................................................................
	margin: unset;
	grid-template-columns: 1in 1fr max-content max-content;
	grid-template-rows: min-content max-content;
	margin-bottom: 0.1in;
	transition: 0.2s ease-out;
	>.avatar {
		grid-column: 1/2; grid-row: 1/2;

		background: linear-gradient(to bottom, tone(-53%), tone(-57%));
		img {
			display: block; width: 1in; height: 1in; margin:0;
			border-right: 1px solid tone(-65%);
		}
	}
	>a[href].username {
		display: block;
		grid-column: 1/3;
		grid-row: 2/3;
		text-align: left;
		text-decoration: none;

Modified store.t from [0ebd27b207] to [adf1545306].

353
354
355
356
357
358
359


360
361
362
363
364
365
366

	tx_enter: &m.source -> bool
	tx_complete: &m.source -> bool
	-- these two functions are special, in that they should be called
	-- directly on a specific backend, rather than passed down to the
	-- backends by the server; that is pathological behavior that will
	-- not have the desired effect



	conf_get: {&m.source, rawstring} -> lib.mem.ptr(int8)
	conf_set: {&m.source, rawstring, rawstring} -> {}
	conf_reset: {&m.source, rawstring} -> {}

	actor_create: {&m.source, &m.actor} -> uint64
	actor_save: {&m.source, &m.actor} -> {}







>
>







353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368

	tx_enter: &m.source -> bool
	tx_complete: &m.source -> bool
	-- these two functions are special, in that they should be called
	-- directly on a specific backend, rather than passed down to the
	-- backends by the server; that is pathological behavior that will
	-- not have the desired effect

	server_setup_self: {&m.source, rawstring, lib.mem.ptr(uint8)} -> {}

	conf_get: {&m.source, rawstring} -> lib.mem.ptr(int8)
	conf_set: {&m.source, rawstring, rawstring} -> {}
	conf_reset: {&m.source, rawstring} -> {}

	actor_create: {&m.source, &m.actor} -> uint64
	actor_save: {&m.source, &m.actor} -> {}