Compare commits

..

No commits in common. "19d6b0df29226af22758c85e4547e0fdf240904f" and "99d4794f3bb3deafbb83732799dd7b9ae631feb9" have entirely different histories.

120 changed files with 19 additions and 478 deletions

View file

@ -1,30 +0,0 @@
Case, Summary, Allocated, Deallocated, Not deallocated, Delta Allocated, Delta Deallocated, Delta Not Deallocated
"Basecase", "Allocation summary allocated 19986 deallocated 245 not deallocated 19741", 19986, 245, 19741
"", "Allocation summary allocated 19986 deallocated 245 not deallocated 19741", 19986, 245, 19741, 0, 0, 0
"nil", "Allocation summary allocated 20019 deallocated 253 not deallocated 19766", 20019, 253, 19766, 33, 8, 25
"()", "Allocation summary allocated 19990 deallocated 249 not deallocated 19741", 19990, 249, 19741, 4, 4, 0
"(quote ())", "Allocation summary allocated 20025 deallocated 247 not deallocated 19778", 20025, 247, 19778, 39, 2, 37
"(list)", "Allocation summary allocated 20023 deallocated 257 not deallocated 19766", 20023, 257, 19766, 37, 12, 25
"(list )", "Allocation summary allocated 20023 deallocated 257 not deallocated 19766", 20023, 257, 19766, 37, 12, 25
"(list 1)", "Allocation summary allocated 20033 deallocated 262 not deallocated 19771", 20033, 262, 19771, 47, 17, 30
"(list 1 1)", "Allocation summary allocated 20043 deallocated 267 not deallocated 19776", 20043, 267, 19776, 57, 22, 35
"(list 1 1 1)", "Allocation summary allocated 20053 deallocated 272 not deallocated 19781", 20053, 272, 19781, 67, 27, 40
"(list 1 2 3)", "Allocation summary allocated 20053 deallocated 272 not deallocated 19781", 20053, 272, 19781, 67, 27, 40
"(+)", "Allocation summary allocated 20022 deallocated 255 not deallocated 19767", 20022, 255, 19767, 36, 10, 26
"(+ 1)", "Allocation summary allocated 20030 deallocated 260 not deallocated 19770", 20030, 260, 19770, 44, 15, 29
"(+ 1 1)", "Allocation summary allocated 20039 deallocated 265 not deallocated 19774", 20039, 265, 19774, 53, 20, 33
"(+ 1 1 1)", "Allocation summary allocated 20048 deallocated 270 not deallocated 19778", 20048, 270, 19778, 62, 25, 37
"(+ 1 2 3)", "Allocation summary allocated 20048 deallocated 270 not deallocated 19778", 20048, 270, 19778, 62, 25, 37
"(list 'a 'a 'a)", "Allocation summary allocated 20137 deallocated 278 not deallocated 19859", 20137, 278, 19859, 151, 33, 118
"(list 'a 'b 'c)", "Allocation summary allocated 20137 deallocated 278 not deallocated 19859", 20137, 278, 19859, 151, 33, 118
"(list :a :b :c)", "Allocation summary allocated 20107 deallocated 260 not deallocated 19847", 20107, 260, 19847, 121, 15, 106
"(list :aa :bb :cc)", "Allocation summary allocated 20185 deallocated 260 not deallocated 19925", 20185, 260, 19925, 199, 15, 184
"(list :aaa :bbb :ccc)", "Allocation summary allocated 20263 deallocated 260 not deallocated 20003", 20263, 260, 20003, 277, 15, 262
"(list :alpha :bravo :charlie)", "Allocation summary allocated 20471 deallocated 260 not deallocated 20211", 20471, 260, 20211, 485, 15, 470
"{}", "Allocation summary allocated 19992 deallocated 251 not deallocated 19741", 19992, 251, 19741, 6, 6, 0
"{:z 0}", "Allocation summary allocated 20029 deallocated 257 not deallocated 19772", 20029, 257, 19772, 43, 12, 31
"{:zero 0}", "Allocation summary allocated 20107 deallocated 257 not deallocated 19850", 20107, 257, 19850, 121, 12, 109
"{:z 0 :o 1}", "Allocation summary allocated 20066 deallocated 261 not deallocated 19805", 20066, 261, 19805, 80, 16, 64
"{:zero 0 :one 1}", "Allocation summary allocated 20196 deallocated 263 not deallocated 19933", 20196, 263, 19933, 210, 18, 192
"{:z 0 :o 1 :t 2}", "Allocation summary allocated 20103 deallocated 265 not deallocated 19838", 20103, 265, 19838, 117, 20, 97
"{:zero 0 :one 1 :two 2 :three 3 :four 4 :five five :six 6 :seven 7 :eight 8 :nine 9}", "Allocation summary allocated 21164 deallocated 306 not deallocated 20858", 21164, 306, 20858, 1178, 61, 1117

View file

@ -1,19 +0,0 @@
#!/home/simon/bin/bb
(require '[clojure.java.io :as io])
(import '[java.lang ProcessBuilder$Redirect])
(defn grep [input pattern]
(let [proc (-> (ProcessBuilder. ["grep" pattern])
(.redirectOutput ProcessBuilder$Redirect/INHERIT)
(.redirectError ProcessBuilder$Redirect/INHERIT)
(.start))
proc-input (.getOutputStream proc)]
(with-open [w (io/writer proc-input)]
(binding [*out* w]
(print input)
(flush)))
(.waitFor proc)
nil))
(grep "hello\nbye\n" "e")

View file

@ -1,60 +0,0 @@
#!/bin/bash
result=0
expected='1'
actual=`echo "(assoc 'foo '((foo . 1) (bar . 2) {ban 3 froboz 4 foo 5} (foobar . 6)))" | target/psse | tail -1`
echo -n "$0 $1: assoc list binding... "
if [ "${expected}" = "${actual}" ]
then
echo "OK"
else
echo "Fail: expected '${expected}', got '${actual}'"
result=`echo "${result} + 1" | bc`
fi
expected='4'
actual=`echo "(assoc 'froboz '((foo . 1) (bar . 2) {ban 3 froboz 4 foo 5} (foobar . 6)))" | target/psse | tail -1`
echo -n "$0 $1: hashmap binding... "
if [ "${expected}" = "${actual}" ]
then
echo "OK"
else
echo "Fail: expected '${expected}', got '${actual}'"
result=`echo "${result} + 1" | bc`
fi
expected='nil'
actual=`echo "(assoc 'ban '((foo . 1) (bar . 2) {ban nil froboz 4 foo 5} (foobar . 6) (ban . 7)))" | target/psse | tail -1`
echo -n "$0 $1: key bound to 'nil' (1)... "
if [ "${expected}" = "${actual}" ]
then
echo "OK"
else
echo "Fail: expected '${expected}', got '${actual}'"
result=`echo "${result} + 1" | bc`
fi
expected='nil'
actual=`echo "(assoc 'foo '((foo . nil) (bar . 2) {ban 3 froboz 4 foo 5} (foobar . 6)))" | target/psse | tail -1`
echo -n "$0 $1: key bound to nil (2)... "
if [ "${expected}" = "${actual}" ]
then
echo "OK"
else
echo "Fail: expected '${expected}', got '${actual}'"
result=`echo "${result} + 1" | bc`
fi

View file

@ -1,31 +0,0 @@
#!/bin/bash
result=0
#####################################################################
# Create an empty map using map notation
expected='(2 3 4)'
actual=`echo "(mapcar (lambda (n) (+ n 1)) '(1 2 3))" | target/psse | tail -1`
echo -n "$0: Mapping interpreted function across list: "
if [ "${expected}" = "${actual}" ]
then
echo "OK"
else
echo "Fail: expected '${expected}', got '${actual}'"
result=1
fi
#####################################################################
# Create an empty map using make-map
expected='("INTR" "REAL" "RTIO" "KEYW")'
actual=`echo "(mapcar type '(1 1.0 1/2 :one))" | target/psse | tail -1`
echo -n "$0: Mapping primitive function across list: "
if [ "${expected}" = "${actual}" ]
then
echo "OK"
else
echo "Fail: expected '${expected}', got '${actual}'"
result=1
fi

View file

@ -1,89 +0,0 @@
# Design decisions for 0.1.0
This is a document that is likely to be revisited, probably frequently.
## Retire the 0.0.X codebase
Move the existing codebase out of the compile space altogether; it is to be
treated as a finished rapid prototype, not extended further, and code largely
not copied but learned from.
## Remain open to new substrate languages, but continue in C for now
I'm disappointed with [Zig](https://ziglang.org/). While the language
concepts are beautiful, and if it were stable it would be an excellent tool, it
isn't stable. I'm still open to build some of the 0.1.X prototype in Zig, but
it isn't the main tool.
I haven't yet evaluated [Nim](https://nim-lang.org/). I'm prejudiced against
its syntax, but, again, I'm open to using it for some of this prototype.
But for now, I will continue to work in C.
## Substrate is shallow
In the 0.0.X prototype, I tried to do too much in the substrate. I tried to
write bignums in C, and in this I failed; I would have done much better to
get a very small Lisp working well sooner, and build new features in that.
In 0.1.X the substrate will be much less feature rich, but support the creation
of novel types of data object in Lisp.
## Paged Space Objects
Paged space objects will be implemented largely in line with [this document](Paged-space-objects.md).
## Tags
Tags will continue to be 32 bit objects, which can be considered as unsigned
integer values or as four bytes. However, only the first three bytes will be
mnemonic. The fourth byte will indicate the size class of the object; where
the size class represents the allocation size, *not* the payload size. The
encoding is as in this table:
| Tag | | | Size of payload | |
| ---- | ----------- | --- | --------------- | --------------- |
| Bits | Field value | Hex | Number of words | Number of bytes |
| ---- | ----------- | --- | --------------- | --------------- |
| 0000 | 0 | 0 | 1 | 8 |
| 0001 | 1 | 1 | 2 | 16 |
| 0010 | 2 | 2 | 4 | 32 |
| 0011 | 3 | 3 | 8 | 64 |
| 0100 | 4 | 4 | 16 | 128 |
| 0101 | 5 | 5 | 32 | 256 |
| 0110 | 6 | 6 | 64 | 512 |
| 0111 | 7 | 7 | 128 | 1024 |
| 1000 | 8 | 8 | 256 | 2048 |
| 1001 | 9 | 9 | 512 | 4096 |
| 1010 | 10 | A | 1024 | 8192 |
| 1011 | 11 | B | 2048 | 16384 |
| 1100 | 12 | C | 4096 | 32768 |
| 1101 | 13 | D | 8192 | 65536 |
| 1110 | 14 | E | 16384 | 131072 |
| 1111 | 15 | F | 32768 | 262144 |
Consequently, an object of size class F will have an allocation size of 32,768 words, but a payload size of 32,766 words. This obviously means that size classes 0 and 1 will not exist, since they would not have any payload.
## Page size
Every page will be 1,048,576 bytes.
## Namespaces
Namespaces will be implemented; in addition to the root namespace, there will be at least the following namespaces:
### :bootstrap
Functions written in the substrate language, intended to be replaced for all normal purposes by functions written in Lisp which may call these bootstrap functions. Not ever available to user code.
### :substrate
Functions written in the substrate language which *may* be available to user-written code.
### :system
Functions, written either in Lisp or in the substrate language, which modify system memory in ways that only trusted and privileged users are permitted to do.
## Access control
Obviously, for this to work, access control lists must be implemented and must work.

View file

@ -1,71 +0,0 @@
# Don't know, don't care
![The famous XKCD cartoon showing all modern digital infrastructure depending on a single person's spare-time project](https://imgs.xkcd.com/comics/dependency.png)
One of the key design principles of the Post Scarcity computing project since my 2006 essay, [Post Scarcity Software](Post-scarcity-software.md), has been "don't know, don't care."
The reason for this is simple. Modern computing systems are extremely complex. It is impossible for someone to be expert on every component of the system. To produce excellent work, it is necessary to specialise, to avoid being distracted by the necessary intricacies of the things on which your work depends, or of the (not yet conceived) intricacies of the work of other people which will ultimately depend on yours. It is necessary to trust.
Randal Munroe's graphic which I've used to illustrate this essay looks like a joke, but it isn't.
[Daniel Stenberg](https://en.wikipedia.org/wiki/Daniel_Stenberg) lives not in Nebraska, but in Sweden. He wrote what became [libcurl](https://curl.se/) in 1996, not 2003. He is still its primary maintainer. It pretty much is true to say that all modern digital infrastructure depends on it. It is a basic component which fetches data over a broad range of internet protocols, negotiating the appropriate security. There *are* alternatives to libcurl in (some) other software environments, but it is extremely widely used. Because it deals with security, it is critical; any vulnerability in it needs to be fixed quickly, because it has very major impact.
The current [post-scarcity software environment](https://git.journeyman.cc/simon/post-scarcity) depends on libcurl, because of course it does. You certainly use libcurl yourself, even if you don't know it. You probably used it to fetch this document, in order to read it.
I don't need to know the intricacies of URL schemae, or of Internet protocols, or of security, to the level of detail Daniel does. I've never even reviewed his code. I trust him to know what he's doing.
Daniel's not alone, of course. Linus Torvalds wrote Linux in a university dorm room in Finland; now it powers the vast majority of servers on the Internet, and the vast majority of mobile phones in the world, and, quite incidentally, a cheap Chinese camera drone I bought to film bike rides. Linux is now an enormous project with thousands of contributors, but Linus is still the person who holds it together. [Rasmus Lerdorf](https://en.wikipedia.org/wiki/Rasmus_Lerdorf), from Greenland, wrote PHP to run his personal home page (the clue is in the name); Mark Zuckerberg used PHP to write Facebook; Michel Valdrighi used PHP to write something called b/cafelog, which Matt Mullenweg further developed into WordPress.
There are thousands of others, of course; and, at the layer of hardware, on which all software depends, there are thousands of others whose names I do not even know. I'm vaguely aware of the architects of the ARM chip, but I had to look them up just now because I couldn't remember their names. I know that the ARM is at least a spiritual descendant of the 6502, but I don't know who designed that or anything of their story; and the antecedents behind that I don't know at all. The people behind all the many other chips which make up a working computer? I know nothing about them.
(In any case, if one seriously wanted to build this thing, it would be better to have custom hardware — one would probably have to have custom hardware at least for the router — and if one were to have custom hardware it would be nice if it ran something very close to Lisp right down on the silicon, as the [Symbolics Ivory](https://gwern.net/doc/cs/hardware/1987-baker.pdf) chips did; so you probably wouldn't use ARM cores at all.)
I have met and personally spoken with most of the people behind the Internet protocol stack, but I don't need to have done so in order to use it; and, indeed, the reason that [Jon Postel](https://en.wikipedia.org/wiki/Jon_Postel) bought me a beer was so that he could sit me down and very gently explain how badly I'd misunderstood something.
-----
But this is the point. We don't need to know, or have known, these people to build on their work. We don't have to, and cannot in detail, fully understand their work. There is simply too much of it, its complexity would overwhelm us.
We don't know. We don't care. And that is a protective mechanism, a mechanism which is necessary in order to allow us to focus on our own task, if we are to produce excellent work. If we are to create a meaningful contribution on which the creators of the future can build.
-----
But there is a paradox, here, one of many conceptual paradoxes that I have encountered working on the Post Scarcity project.
I am essentially a philosopher, or possibly a dilettante, rather than an engineer. When [Danny Hillis](https://longnow.org/people/board/danny0/) came up with the conception of the [Connection Machine](), a machine which is consciously one of the precursors of the post-scarcity project, he sought expert collaborators — and was so successful in doing so that [he persuaded Richard Feynman to join the project](https://longnow.org/ideas/richard-feynman-and-the-connection-machine/). I haven't recruited any collaborators. I don't have the social skills. And I don't have sufficient confidence that my idea is even good in itself.
In building the first software prototype, I realised that I don't even properly understand what it means to [intern](http://www.ai.mit.edu/projects/iiip/doc/CommonLISP/HyperSpec/Body/fun_intern.html) something. I realised that I still don't understand how in many Common Lisp implementations, for any integer number `n`, `(eq n n)` can return true. I note that in practice it *does*, but I don't understand how it's done.
In the current post scarcity prototype, it *is* true for very small values of `n`, because I cache an array of small positive integers as an optimisation hack to prevent memory churn, but that's very special case and I cannot believe that Common Lisp implementations are doing it for significantly larger numbers of integers. I note that in SBCL, two bignums of equal value are not `eq`, so presumably SBCL is doing some sort of hack similar to mine, but I do not know how it works and I *shouldn't* care.
Platonically, two instances of the same number *should be* the same object; but we do not live in a Platonic world and I don't want to. I'm perfectly happy that `eq` (which should perhaps be renamed `identical?`) should not work for numbers.
What the behaviour is of the functions that we use, at whatever layer in the stack we work, does matter. We do need to know that. But what happens under the surface in order to deliver that behaviour? We don't need to know. We don't need to care. And we shouldn't, because that way leads to runaway recursion: behind every component, there is another component, which makes other compromises with physical matter which make good engineering sense to the people who understand that component well enough to design and to maintain it.
The stack is not of infinite depth, of course. At its base is silicon, and traces of metals on silicon, and the behaviour of electrons as they interact with individual atoms in those traces. That is knowable, in principle, by someone. But there are sufficiently many layers in the stack, and sufficient complexity in each layer, that to have a good, clear, understanding of every layer is beyond the mental capacity of anyone I know, and, I believe, is generally beyond the mental capacity of any single person.
-----
But this is the point. The point is I do need to know, and do need to care, if I am to complete this project on my own; and I don't have sufficient faith in the utility of the project (or my ability to communicate that utility) that I believe that anyone else will ever care enough to contribute to it.
And I don't have the skills, or the energy, or, indeed, the remaining time, to build any of it excellently. If it is to be built, I need collaborators; but I don't have the social skills to attract collaborators, or probably to work with them; and, actually, if I did have expert collaborators there would probably be no place for me in the project, because I don't have excellence at anything.
-----
I realise that I don't even really understand what a hypercube is. I describe my architecture as a hypercube. It is a cube because it has three axes, even though each of those axes is conceptually circular. Because the axes are circular, the thing can only be approximated in three dimensional space by using links of flexible wire or glass fibres to join things which, in three dimensional topology, cannot otherwise be joined; it is therefore slightly more than three dimensional while being considerably less than four dimensional.
I *think* this is also Hillis' understanding of a hypercube, but I could be wrong on that.
Of course, my architecture could be generalised to have four, or five, or six, or more circular axes
[^1]: Could it? I'm reasonably confident that it could have *six* circular axes, but I cannot picture in my head how the grid intersections of a four-and-a-bit dimensional grid would work.
, and this would result in each node having more immediate neighbours, which would potentially speed up computation by shortening hop paths. But I cannot help feeling that with each additional axis there comes a very substantial increase in the complexity of physically routing the wires, so three-and-a-bit dimensions may be as good as you practically get.
I don't have the mathematical skill to mentally model how a computation would scale through this structure. It's more an 'if I build it I will find out whether this is computationally efficient' than an 'I have a principled idea of why this should be computationally efficient.' Intuitively, it *should be* more efficient than a [von Neumann architecture](https://en.wikipedia.org/wiki/Von_Neumann_architecture), and it's easy to give an account of how it can address (much) more memory than obvious developments of our current architectures. But I don't have a good feel of the actual time cost of copying data hoppity-hop across the structure, or the heuristics of when it will be beneficial to shard a computation between neighbours.
-----
Which brings me back to why I'm doing this. I'm doing it, principally, to quiet the noises in my brain; as an exercise in preventing my propensity for psychiatric melt-down from overwhelming me. It isn't, essentially, well-directed engineering. It is, essentially, self-prescribed therapy. There is no reason why anyone else should be interested.
Which is, actually, rather solipsistic. Not a thought I like!

View file

@ -1,69 +0,0 @@
# Paged space objects
*Antecedents for this essay:
1. [Reference counting, and the garbage collection of equal sized objects](https://www.journeyman.cc/blog/posts-output/2013-08-25-reference-counting-and-the-garbage-collection-of-equal-sized-objects/);
2. [Vector space, Pages, Mark-but-don't-sweep, and the world's slowest ever rapid prototype](https://www.journeyman.cc/blog/posts-output/2026-03-13-The-worlds-slowest-ever-rapid-prototype/).*
The post-scarcity software environment needs to store data in objects. Much of the data will be in objects which will fit in the memory footpring ot a cons cell, but some won't, and those that won't will be in a variety of sizes.
Conventionally, operating systems allocate memory as a heap. If you allocate objects of differing sizes from a heap, the heap becoms fragmented, like a [Sierpiński carpet] or [Cantor dust](https://en.wikipedia.org/wiki/Cantor_set#Cantor_dust) — there are lots of holes in it, but it becomes increasingly difficult to find a hole which will fit anything large.
If we store our objects in containers of standardised sizes, then, for each of those standardised sizes, we can maintain a freelisp of currently unused containers, from which new containers can be allocated. But we still don't want those relatively small objects floating around independently in memory, because we'll still get the fragmentation problem.
This was the initial motivation behind [cons pages](https://www.journeyman.cc/post-scarcity/html/conspage_8h.html#structcons__page). However, quite early in the development of the prototype, it became obvious that we were allocating and deallocating very many stack frames, and many hash tables, neither of which fit in the memory footprint of a cons cell; and that, going forward, it was likely that we would generate many other sorts of larger objects.
My first thought was to generalise the cons page idea, and generate pages of equal sized objects; that is, one set of pages for objects (like cons cells) with a two word payload, one for objects with a four word payload, one for objects with an eight word payload, and so on. The key idea was that each of these pages would be of equal size, so that if, say, we needed to allocate more eight word objects and there was a page for two word objects currently empty, the memory footprint could be reassigned: the hole in the carpet would be the right size.
If we have to allocate an object which needs a five word payload, it will have to be allocated as an eight word object in an eight word object page, which wastes some memory, for the lifetime of that object; but that memory can be efficiently recovered at the end of life, and the heap doesn't fragment. Any page will, at any time, be partly empty, which wastes more memory, but again, that memory can later be efficiently reused.
The potential problem is that you might end up, say, with many pages for two word objects each of which were partly empty, and have nowhere to allocate new eight word objects; and if this does prove in practice to be a problem, then a mark and sweep garbage collector — something I *really* don't want — will be needed. But that is not a problem for just now.
## Efficiently allocating pages
I cannot see how we can efficiently manage pages without each page having some housekeeping data, as every other data object in the system must have a header for housekeeping data. It may be that I am just stuck in my thinking and that the header for pages is not needed, but I *think* it is, and I am going to proceed for now as though it were.
The problem here is that, on an essentially binary machine, it makes sense to allocate things in powers of two; and, as that makes sense at the level of allocating objects in pages, so it makes sense at the level of the basic heap allocator. I'm proposing to allocate objects in standardised containers of these payload sizes:
| Tag | | | Size of payload | |
| ---- | ----------- | --- | --------------- | --------------- |
| Bits | Field value | Hex | Number of words | Number of bytes |
| ---- | ----------- | --- | --------------- | --------------- |
| 0000 | 0 | 0 | 1 | 8 |
| 0001 | 1 | 1 | 2 | 16 |
| 0010 | 2 | 2 | 4 | 32 |
| 0011 | 3 | 3 | 8 | 64 |
| 0100 | 4 | 4 | 16 | 128 |
| 0101 | 5 | 5 | 32 | 256 |
| 0110 | 6 | 6 | 64 | 512 |
| 0111 | 7 | 7 | 128 | 1024 |
| 1000 | 8 | 8 | 256 | 2048 |
| 1001 | 9 | 9 | 512 | 4096 |
| 1010 | 10 | A | 1024 | 8192 |
| 1011 | 11 | B | 2048 | 16384 |
| 1100 | 12 | C | 4096 | 32768 |
| 1101 | 13 | D | 8192 | 65536 |
| 1110 | 14 | E | 16384 | 131072 |
| 1111 | 15 | F | 32768 | 262144 |
This scheme allows me to store the allocation payload size of an object, and consequently the type of a page intended to store objects of that size, in four bits, which is pretty economic. But it's not nothing, and there's a cost to this. The irreducable minimum size of header that objects in the system need to have — in my current design — is two words. So the allocation size of an object with a payload of two words, is four words; but the allocation size of an object with a payload size of thirty two thousand, seven hundred and sixty eight words, is thirty two thousand, seven hundred and seventy words.
Why does that matter?
Well, suppose we allocate pages of a megabyte, and we take out of that megabyte a two word page header. Then we can fit 262,143 objects with a payload size of two into that page, and waste only two words. But we can fit only three objects of size 262,144 into such a page, and we waste 262,138 words, which feels bad.
When I first realised this, I thought, well, the idea was nice, but it doesn't work. There are three potential solutions, each of which feel inelegant to me:
1. We simply ignore the wasted space;
2. Given that the overwhelming majority of objects used by the system, especially of transient objects, will be of payload size two (allocation size four), we fill all 'spare' space in pages with objects of payload size two, and push them all onto the freelist of objects of payload size two;
(this feels ugly to me because it breaks the idea that all objects on a given page should be of the same size)
3. We treat the size signature of the page — that four bit value — as being related not to the payload size of the ojects to be allocated into the page, but to the allocation size; so that cons cells, with a payload size of two and thus an allocation size of four, would be allocated into pages with a size tag of 0001 and not a size tag of 0010; and we store the housekeeping data for the page itself (waves hands vaguely) somewhere else;
(this feels ugly to me because, for me, the size of an object is its payload size, and I'm deeply bothered by things foating about randomly in memory without identifying information).
There's a wee bit of autistic insistence on order in my design choices there, that I should not get hung up on. Some objects really do need allocation sizes in memory which are powers of two, but most in fact don't. Currently, the only objects which I commonly allocate and deallocate which are not cons-space objects — not objects with a payload size of two — are stack frames (current payload size 12) and hash tables (current payload size variable, but defaults to 34).
If we're storing the (encoded) allocation size of each object in the tag of the object — which I think that in the 0.1.0 prototype we will, and if every object on any given page is of the same size, which seems to me a good plan, then I'm not sure that we actually need to store any other housekeeping data on the page, because the header of every object is the same size, and the header of every object in the page holds the critical bit of housekeeping information about the page, so we can always get that value from the header of the first object in the page.
If we take these two pragmatic compromises together — that the size encoded in the tag of an object is its allocation saize not its payload size, and that the allocation size in the first object on a page is the allocation size for that page — then every page can fit an exact number of objects with no space wasted.
That's not beautiful but I think it's sensible.

View file

@ -1,23 +1,17 @@
# Roadmap
With the release of 0.0.6 close, it's time to look at a plan for the future
development of the project.
With the release of 0.0.6 close, it's time to look at a plan for the future development of the project.
I have an almost-working Lisp interpreter, which, as an interpreter, has many
of the features of the language I want. It runs in one thread on one processor.
I have an almost-working Lisp interpreter, which, as an interpreter, has many of the features of the language I want. It runs in one thread on one processor.
Given how experimental this all is, I don't think I need it to be a polished
interpreter, and polished it isn't. Lots of things are broken.
Given how experimental this all is, I don't think I need it to be a polished interpreter, and polished it isn't. Lots of things are broken.
* garbage collection is pretty broken, and I'n beginning to doubt my whole
garbage collection strategy;
* garbage collection is pretty broken, and I'n beginning to doubt my whole garbage collection strategy;
* bignums are horribly broken;
* there's something very broken in shallow-bound symbols, and that matters
and will have to be fixed;
* there's something very broken in shallow-bound symbols, and that matters and wil have to be fixed;
* there are undoubtedly many other bugs I don't know about.
However, while I will fix bugs where I can, it's good enough for other people
to play with if they're mad enough, and it's time to move on.
However, while I will fix bugs where I can, it's good enough for other people to play with if they're mad enough, and it's time to move on.
## Next major milestones
@ -56,77 +50,44 @@ So release 0.1.0, which I'll target for 1<sup>st</sup> January 2027, will
essentially be a Lisp interpreter running on the new substrate and memory
architecture, without any significant new features.
See [0.1.0 design decisions](0-1-0-design-decisions.md) for more detail.
### Simulated hypercube
There is really no point to this whole project while it remains a single thread
running on a single processor. Until I can pass off computation to peer
neighbours, I can't begin to understand what the right strategies are for when
to do so.
There is really no point to this whole project while it remains a single thread running on a single processor. Until I can pass off computation to peer neighbours, I can't begin to understand what the right strategies are for when to do so.
`cond` is explicitly sequential, since later clauses should not be executed at
all if earlier ones succeed. `progn` is sort of implicitly sequential, since
it's the value of the last form in the sequence which will be returned.
`cond` is explicitly sequential, since later clauses should not be executed at all if earlier ones succeed. `progn` is sort of implicitly sequential, since it's the value of the last form in the sequence which will be returned.
For `mapcar`, the right strategy might be to partition the list argument
between each of the idle neighbours, and then reassemble the results that come
bask.
For `mapcar`, the right strategy might be to partition the list argument between each of the idle neighbours, and then reassemble the results that come bask.
For most other things, my hunch is that you pass args which are not
self-evaluating to idle neighbours, keeping (at least) one on the originating
node to work on while they're busy.
For most other things, my hunch is that you pass args which are not self-evaluating to idle neighbours, keeping (at least) one on the originating node to work on while they're busy.
But before that can happen, we need a router on each node which can monitor
concurrent traffic on six bidirectional links. I think at least initially what
gets written across those links is just S-expressions.
But before that can happen, we need a router on each node which can monitor concurrent traffic on six bidirectional links. I think at least initially what gets written across those links is just S-expressions.
I think a working simulated hypercube is the key milestone for version 0.2.0.
I think a working simulated hypercube is the key milestone for version 0.1.1.
### Sysout, sysin, and system persistance
Doctrine is that the post scarcity computing environment doesn't have a file
system, but nevertheless we need some way of making an image of a working
system so that, after a catastrophic crash or a power outage, it can be brought
back up to a known good state. This really needs to be in 0.1.1.
Doctrine is that the post scarcity computing environment doesn't have a file system, but nevertheless we need some way of making an image of a working system so that, after a catastrophic crash or a power outage, it can be brought back up to a known good state. This also really needs to be in 0.1.1.
### Better command line experience
The current command line experience is embarrassingly poor. Recallable input
history, input line editing, and a proper structure editor are all things that
I will need for my comfort.
The current command line experience is embarrassingly poor. Recallable input history, input line editing, and a proper structure editor are all things that I will need for my comfort.
### Users, groups and ACLs
Allowing multiple users to work together within the same post scarcity
computing environment while retaining security and privacy is a major goal. So
working out ways for users to sign on and be authenticated, and to configure
their own environment, and to set up their own access control lists on objects
they create, needs to be another nearish term goal. Probably 0.1.2.
Allowing multiple users to work together within the same post scarcity computing environment while retaining security and privacy is a major goal. So working out ways for users to sign on and be authenticated, and to configure their own environment, and to set up their own access control lists on objects they create, needs to be another nearish term goal. Probably 0.1.2.
### Homogeneities, regularities, slots, migration, permeability
There are a lot of good ideas about the categorisation and organisation of data
which are sketched in my original
[Post scarcity software](Post-scarcity-software.md) essay which I've never
really developed further because I didn't have the right software environment
for them, which now I shall have. It would be good to build them.
There are a lot of good ideas about the categorisation and organisation of data which are sketched in my original [Post scarcity software](Post-scarcity-software.md) essay which I've never really developed further because I didn't have the right software environment for them, which now I shall have. It would be good to build them.
### Compiler
I do want this system to have a compiler. I do want compiled functions to be
the default. And I do want to understand how to write my own compiler for a
system like this. But until I know what the processor architecture of the
system I'm targetting is, worrying too much about a compiler seems premature.
I do want this system to have a compiler. I do want compiled functions to be the default. And I do want to understand how to write my own compiler for a system like this. But until I know what the processor architecture of the system I'm targetting is, worrying too much about a compiler seems premature.
### Graphical User Interface
Ultimately I want a graphical user interface at least as fluid and flexible as
what we had on Interlisp machines 40 years ago. It's not a near term goal yet.
Ultimately I want a graphical user interface at least as fluid and flexible as what we had on Interlisp machines 40 years ago. It's not a near term goal there.
### Real hardware
This machine would be **very** expensive to build, and there's no way I'm ever
going to afford more than a sixty-four node machine. But it would be nice to
have software which would run effectively on a four billion node machine, if
one could ever be built. I think that has to be the target for version 1.0.0.
This machine would be **very** expensive to build, and there's no way I'm ever going to afford more than a sixty-four node machine. But it would be nice to have software which would run effectively on a four billion node machine, if one could ever be built. I think that has to be the target for version 1.0.0.

View file

@ -1,29 +1,5 @@
# State of Play
## 20260323
I started an investigastion of the [Zig language](https://ziglang.org/) and
come away frustrated. It's definitely an interesting language, and *I think*
one capable of doing what I want. But in trying to learn, I checked out
someone else's [Lisp interpreter in Zig](https://github.com/cryptocode/bio).
The last commit to this project is six months ago, so fairly current; project
documentation is polished, implying the project is well advanced and by someone
competent.
It won't build.
It won't build because there are breaking changes to the build system in the
current version of Zig, and, according to helpful people on the Zig language
Discord, breaking changes in Zig versions are quite frequent.
Post-scarcity is a project which procedes slowly, and is very large indeed. I
will certainly not complete it before I die.
I don't feel unstable tools are a good choice.
I have, however, done more thinking about [Paged space objects], and think I
now have a buildable specification.
## 20260319
Right, the `member?` bug [is fixed](https://git.journeyman.cc/simon/post-scarcity/issues/11).

View file

@ -1,9 +0,0 @@
/// A Page is an area of memory in which objects are stored. Every page has
/// a header, and every page header has common structure. The objects stored
/// on any page are all PagedObjects, q.v. and, on any given page, all the
/// objects stored on that page are of the same size.
const Page = struct {
const content = union {
const bytes = [1048576]u8;
};
};

View file

@ -1,17 +0,0 @@
/// Header for objects which are allocated in pages.
const PagedSpaceObjectHeader = struct {
const tag = union {
const bytes = [4]u8;
const value = u32;
};
var count = u32;
const acl = u64; // later when we have a pointer object defined this will be substituted
};
const PSO4: type = struct {
const PagedSpaceObjectHeader: header;
const payload = union {
var bytes: [8]u8;
var words: [2]u64;
};
};

View file

@ -1 +0,0 @@
const version: []const u8 = "0.1.0-SNAPSHOT";

View file

Can't render this file because it has a wrong number of fields in line 2.

Some files were not shown because too many files have changed in this diff Show more