From 49ee69dea3d55d7d91004043cf37625b4df4ed69 Mon Sep 17 00:00:00 2001 From: LorisSigrist Date: Fri, 13 Dec 2024 00:13:19 +0000 Subject: [PATCH] =?UTF-8?q?Deploying=20to=20gh-pages=20from=20@=20LorisSig?= =?UTF-8?q?rist/homepage@b34f8488b2c8dd441be250b325bef46e74973f18=20?= =?UTF-8?q?=F0=9F=9A=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../chunks/{Metadata.DnEAGf0u.js => Metadata.Bw4XScZR.js} | 2 +- .../chunks/{entry.Z8HtnyGu.js => entry.yZjI-Yon.js} | 6 +++--- .../chunks/{stores.CwalIs4n.js => stores.DY7R87KL.js} | 2 +- _app/immutable/entry/{app.CxOKNbHK.js => app.Dm30yPop.js} | 8 ++++---- _app/immutable/entry/start.C11_HzFi.js | 4 ---- _app/immutable/entry/start.ggmG3Jj7.js | 4 ++++ _app/immutable/nodes/{1.k5BtEzhJ.js => 1.B1PS4BAS.js} | 2 +- _app/immutable/nodes/{2.GgLf60-q.js => 2.CM7yvUKv.js} | 2 +- _app/immutable/nodes/{4.D11J9D_p.js => 4.B1e19oMb.js} | 2 +- _app/version.json | 2 +- adding-devtools-to-vite-plugins.html | 4 ++-- blurhash.html | 2 +- building-an-i18n-library-for-the-modern-web.html | 4 ++-- declarative-exception-handling.html | 4 ++-- dts-buddy-fixes-type-declarations.html | 4 ++-- fluent-inputs.html | 4 ++-- i-started-taking-notes.html | 2 +- index.html | 2 +- melt-ui-neat-pattern.html | 4 ++-- mock-data-with-zocker.html | 4 ++-- reliably-avoiding-theme-flashes.html | 4 ++-- tailwind-ui.html | 4 ++-- the-better-way-to-load-data.html | 4 ++-- the-poor-mans-scheduled-post.html | 4 ++-- zod-driven-development.html | 2 +- 25 files changed, 43 insertions(+), 43 deletions(-) rename _app/immutable/chunks/{Metadata.DnEAGf0u.js => Metadata.Bw4XScZR.js} (99%) rename _app/immutable/chunks/{entry.Z8HtnyGu.js => entry.yZjI-Yon.js} (99%) rename _app/immutable/chunks/{stores.CwalIs4n.js => stores.DY7R87KL.js} (90%) rename _app/immutable/entry/{app.CxOKNbHK.js => app.Dm30yPop.js} (99%) delete mode 100644 _app/immutable/entry/start.C11_HzFi.js create mode 100644 _app/immutable/entry/start.ggmG3Jj7.js rename _app/immutable/nodes/{1.k5BtEzhJ.js => 1.B1PS4BAS.js} (97%) rename _app/immutable/nodes/{2.GgLf60-q.js => 2.CM7yvUKv.js} (99%) rename _app/immutable/nodes/{4.D11J9D_p.js => 4.B1e19oMb.js} (99%) diff --git a/_app/immutable/chunks/Metadata.DnEAGf0u.js b/_app/immutable/chunks/Metadata.Bw4XScZR.js similarity index 99% rename from _app/immutable/chunks/Metadata.DnEAGf0u.js rename to _app/immutable/chunks/Metadata.Bw4XScZR.js index ca350800..886c6a2e 100644 --- a/_app/immutable/chunks/Metadata.DnEAGf0u.js +++ b/_app/immutable/chunks/Metadata.Bw4XScZR.js @@ -1,6 +1,6 @@ import { s as safe_not_equal, n as noop, c as component_subscribe } from "./scheduler.DwkGEAYb.js"; import { S as SvelteComponent, i as init, e as element, n as empty, q as head_selector, c as claim_element, d as detach, m as attr, h as append_hydration, g as insert_hydration } from "./index.BoYP9eQs.js"; -import { p as page } from "./stores.CwalIs4n.js"; +import { p as page } from "./stores.DY7R87KL.js"; const default_favicon = "" + new URL("../assets/favicon.BtYv9CGA.png", import.meta.url).href; function create_else_block(ctx) { let meta; diff --git a/_app/immutable/chunks/entry.Z8HtnyGu.js b/_app/immutable/chunks/entry.yZjI-Yon.js similarity index 99% rename from _app/immutable/chunks/entry.Z8HtnyGu.js rename to _app/immutable/chunks/entry.yZjI-Yon.js index 60ed56ec..c4db4833 100644 --- a/_app/immutable/chunks/entry.Z8HtnyGu.js +++ b/_app/immutable/chunks/entry.yZjI-Yon.js @@ -310,9 +310,9 @@ function set(key, value, stringify = JSON.stringify) { } catch { } } -const base = ((_a = globalThis.__sveltekit_rwdz7a) == null ? void 0 : _a.base) ?? ""; -const assets = ((_b = globalThis.__sveltekit_rwdz7a) == null ? void 0 : _b.assets) ?? base; -const version = "1733962297567"; +const base = ((_a = globalThis.__sveltekit_1wckiq8) == null ? void 0 : _a.base) ?? ""; +const assets = ((_b = globalThis.__sveltekit_1wckiq8) == null ? void 0 : _b.assets) ?? base; +const version = "1734048701097"; const SNAPSHOT_KEY = "sveltekit:snapshot"; const SCROLL_KEY = "sveltekit:scroll"; const STATES_KEY = "sveltekit:states"; diff --git a/_app/immutable/chunks/stores.CwalIs4n.js b/_app/immutable/chunks/stores.DY7R87KL.js similarity index 90% rename from _app/immutable/chunks/stores.CwalIs4n.js rename to _app/immutable/chunks/stores.DY7R87KL.js index 85ca01da..ba242ef2 100644 --- a/_app/immutable/chunks/stores.CwalIs4n.js +++ b/_app/immutable/chunks/stores.DY7R87KL.js @@ -1,4 +1,4 @@ -import { s as stores } from "./entry.Z8HtnyGu.js"; +import { s as stores } from "./entry.yZjI-Yon.js"; const getStores = () => { const stores$1 = stores; return { diff --git a/_app/immutable/entry/app.CxOKNbHK.js b/_app/immutable/entry/app.Dm30yPop.js similarity index 99% rename from _app/immutable/entry/app.CxOKNbHK.js rename to _app/immutable/entry/app.Dm30yPop.js index e8d0f39e..02c98f6a 100644 --- a/_app/immutable/entry/app.CxOKNbHK.js +++ b/_app/immutable/entry/app.Dm30yPop.js @@ -4065,10 +4065,10 @@ class Root extends SvelteComponent { } const nodes = [ () => __vitePreload(() => import("../nodes/0.CMIPQFkk.js"), true ? __vite__mapDeps([0,1,2]) : void 0, import.meta.url), - () => __vitePreload(() => import("../nodes/1.k5BtEzhJ.js"), true ? __vite__mapDeps([3,1,2,4,5,6]) : void 0, import.meta.url), - () => __vitePreload(() => import("../nodes/2.GgLf60-q.js"), true ? __vite__mapDeps([7,1,2,8,4,5,6,9,10,11]) : void 0, import.meta.url), + () => __vitePreload(() => import("../nodes/1.B1PS4BAS.js"), true ? __vite__mapDeps([3,1,2,4,5,6]) : void 0, import.meta.url), + () => __vitePreload(() => import("../nodes/2.CM7yvUKv.js"), true ? __vite__mapDeps([7,1,2,8,4,5,6,9,10,11]) : void 0, import.meta.url), () => __vitePreload(() => import("../nodes/3.BMPqw2Om.js"), true ? __vite__mapDeps([12,1,2,13]) : void 0, import.meta.url), - () => __vitePreload(() => import("../nodes/4.D11J9D_p.js"), true ? __vite__mapDeps([14,1,2,8,4,5,6,9]) : void 0, import.meta.url), + () => __vitePreload(() => import("../nodes/4.B1e19oMb.js"), true ? __vite__mapDeps([14,1,2,8,4,5,6,9]) : void 0, import.meta.url), () => __vitePreload(() => import("../nodes/5.BrRoiui3.js"), true ? __vite__mapDeps([15,1,2,10,11,6,16,17]) : void 0, import.meta.url), () => __vitePreload(() => import("../nodes/6.Cm6WBkrv.js"), true ? __vite__mapDeps([18,1,2]) : void 0, import.meta.url), () => __vitePreload(() => import("../nodes/7.7J3Dh_0O.js"), true ? __vite__mapDeps([19,1,2]) : void 0, import.meta.url), @@ -4119,7 +4119,7 @@ export { }; function __vite__mapDeps(indexes) { if (!__vite__mapDeps.viteFileDeps) { - __vite__mapDeps.viteFileDeps = ["../nodes/0.CMIPQFkk.js","../chunks/scheduler.DwkGEAYb.js","../chunks/index.BoYP9eQs.js","../nodes/1.k5BtEzhJ.js","../chunks/stores.CwalIs4n.js","../chunks/entry.Z8HtnyGu.js","../chunks/index.CU0sutvA.js","../nodes/2.GgLf60-q.js","../chunks/Metadata.DnEAGf0u.js","../assets/Metadata.DyfCrczt.css","../chunks/each.CuygwpM4.js","../chunks/theme.COyUI6g7.js","../nodes/3.BMPqw2Om.js","../assets/3.CqjOwtjT.css","../nodes/4.D11J9D_p.js","../nodes/5.BrRoiui3.js","../chunks/_commonjsHelpers.DWwsNxpa.js","../assets/5.BZyQRmUM.css","../nodes/6.Cm6WBkrv.js","../nodes/7.7J3Dh_0O.js","../nodes/8.BjfjDFgb.js","../nodes/9.DcFycQyw.js","../nodes/10.DYw3yv1V.js","../chunks/preload-helper.CP-VSAQc.js","../chunks/Showcase.Dhs04y0u.js","../nodes/11.Bqipk0aN.js","../nodes/12.C8xP_BLy.js","../nodes/13.DHo-KTOd.js","../nodes/14.fTvgoynY.js","../nodes/15.BkzjzWD0.js","../nodes/16.DeDV1Yy2.js","../nodes/17.f7e5KaRI.js","../nodes/18.D07N7BNU.js"] + __vite__mapDeps.viteFileDeps = ["../nodes/0.CMIPQFkk.js","../chunks/scheduler.DwkGEAYb.js","../chunks/index.BoYP9eQs.js","../nodes/1.B1PS4BAS.js","../chunks/stores.DY7R87KL.js","../chunks/entry.yZjI-Yon.js","../chunks/index.CU0sutvA.js","../nodes/2.CM7yvUKv.js","../chunks/Metadata.Bw4XScZR.js","../assets/Metadata.DyfCrczt.css","../chunks/each.CuygwpM4.js","../chunks/theme.COyUI6g7.js","../nodes/3.BMPqw2Om.js","../assets/3.CqjOwtjT.css","../nodes/4.B1e19oMb.js","../nodes/5.BrRoiui3.js","../chunks/_commonjsHelpers.DWwsNxpa.js","../assets/5.BZyQRmUM.css","../nodes/6.Cm6WBkrv.js","../nodes/7.7J3Dh_0O.js","../nodes/8.BjfjDFgb.js","../nodes/9.DcFycQyw.js","../nodes/10.DYw3yv1V.js","../chunks/preload-helper.CP-VSAQc.js","../chunks/Showcase.Dhs04y0u.js","../nodes/11.Bqipk0aN.js","../nodes/12.C8xP_BLy.js","../nodes/13.DHo-KTOd.js","../nodes/14.fTvgoynY.js","../nodes/15.BkzjzWD0.js","../nodes/16.DeDV1Yy2.js","../nodes/17.f7e5KaRI.js","../nodes/18.D07N7BNU.js"] } return indexes.map((i) => __vite__mapDeps.viteFileDeps[i]) } diff --git a/_app/immutable/entry/start.C11_HzFi.js b/_app/immutable/entry/start.C11_HzFi.js deleted file mode 100644 index c51cf5a7..00000000 --- a/_app/immutable/entry/start.C11_HzFi.js +++ /dev/null @@ -1,4 +0,0 @@ -import { a } from "../chunks/entry.Z8HtnyGu.js"; -export { - a as start -}; diff --git a/_app/immutable/entry/start.ggmG3Jj7.js b/_app/immutable/entry/start.ggmG3Jj7.js new file mode 100644 index 00000000..8bc14d8d --- /dev/null +++ b/_app/immutable/entry/start.ggmG3Jj7.js @@ -0,0 +1,4 @@ +import { a } from "../chunks/entry.yZjI-Yon.js"; +export { + a as start +}; diff --git a/_app/immutable/nodes/1.k5BtEzhJ.js b/_app/immutable/nodes/1.B1PS4BAS.js similarity index 97% rename from _app/immutable/nodes/1.k5BtEzhJ.js rename to _app/immutable/nodes/1.B1PS4BAS.js index bff0eebb..0c9bc0b4 100644 --- a/_app/immutable/nodes/1.k5BtEzhJ.js +++ b/_app/immutable/nodes/1.B1PS4BAS.js @@ -1,6 +1,6 @@ import { s as safe_not_equal, n as noop, c as component_subscribe } from "../chunks/scheduler.DwkGEAYb.js"; import { S as SvelteComponent, i as init, e as element, t as text, s as space, c as claim_element, a as children, b as claim_text, d as detach, f as claim_space, g as insert_hydration, h as append_hydration, j as set_data } from "../chunks/index.BoYP9eQs.js"; -import { p as page } from "../chunks/stores.CwalIs4n.js"; +import { p as page } from "../chunks/stores.DY7R87KL.js"; function create_fragment(ctx) { var _a; let h1; diff --git a/_app/immutable/nodes/2.GgLf60-q.js b/_app/immutable/nodes/2.CM7yvUKv.js similarity index 99% rename from _app/immutable/nodes/2.GgLf60-q.js rename to _app/immutable/nodes/2.CM7yvUKv.js index e9c50aec..72049fa3 100644 --- a/_app/immutable/nodes/2.GgLf60-q.js +++ b/_app/immutable/nodes/2.CM7yvUKv.js @@ -1,6 +1,6 @@ import { s as safe_not_equal, l as assign, n as noop, m as compute_rest_props, p as exclude_internal_props, a as create_slot, u as update_slot_base, g as get_all_dirty_from_scope, b as get_slot_changes } from "../chunks/scheduler.DwkGEAYb.js"; import { S as SvelteComponent, i as init, J as svg_element, K as claim_svg_element, a as children, d as detach, L as set_svg_attributes, g as insert_hydration, F as destroy_each, e as element, s as space, c as claim_element, f as claim_space, m as attr, o as transition_in, p as transition_out, h as append_hydration, y as create_component, k as get_svelte_dataset, z as claim_component, A as mount_component, r as listen, B as destroy_component, q as head_selector } from "../chunks/index.BoYP9eQs.js"; -import { M as Metadata } from "../chunks/Metadata.DnEAGf0u.js"; +import { M as Metadata } from "../chunks/Metadata.Bw4XScZR.js"; import { e as ensure_array_like } from "../chunks/each.CuygwpM4.js"; import { t as theme } from "../chunks/theme.COyUI6g7.js"; function get_spread_update(levels, updates) { diff --git a/_app/immutable/nodes/4.D11J9D_p.js b/_app/immutable/nodes/4.B1e19oMb.js similarity index 99% rename from _app/immutable/nodes/4.D11J9D_p.js rename to _app/immutable/nodes/4.B1e19oMb.js index a13a80a9..dbe3c6e3 100644 --- a/_app/immutable/nodes/4.D11J9D_p.js +++ b/_app/immutable/nodes/4.B1e19oMb.js @@ -1,6 +1,6 @@ import { s as safe_not_equal, j as src_url_equal, E as action_destroyer, q as is_function } from "../chunks/scheduler.DwkGEAYb.js"; import { S as SvelteComponent, i as init, y as create_component, s as space, e as element, t as text, n as empty, z as claim_component, f as claim_space, c as claim_element, b as claim_text, m as attr, A as mount_component, g as insert_hydration, u as set_input_value, r as listen, j as set_data, o as transition_in, p as transition_out, d as detach, B as destroy_component, a as children, h as append_hydration } from "../chunks/index.BoYP9eQs.js"; -import { M as Metadata } from "../chunks/Metadata.DnEAGf0u.js"; +import { M as Metadata } from "../chunks/Metadata.Bw4XScZR.js"; function load() { return { title: "Blurhash", diff --git a/_app/version.json b/_app/version.json index 28bd479d..d426dd23 100644 --- a/_app/version.json +++ b/_app/version.json @@ -1 +1 @@ -{"version":"1733962297567"} \ No newline at end of file +{"version":"1734048701097"} \ No newline at end of file diff --git a/adding-devtools-to-vite-plugins.html b/adding-devtools-to-vite-plugins.html index c2744ceb..3ad00413 100644 --- a/adding-devtools-to-vite-plugins.html +++ b/adding-devtools-to-vite-plugins.html @@ -1,4 +1,4 @@ - Adding Devtools to Vite plugins

Adding Devtools to Vite plugins

One of my favorite features in any framework is the Svelte Inspector. It allows you to click on a component and then it magically opens the relevant file in your editor.

In order to accomplish this, without the user’s having to do additional setup, they have to inject their devtool code into the browser during development. Today we will learn how to do that, so that you too can build great devtools!

Getting a Foothold - Injecting JS into the Browser

The key is to inject code into vite’s client side entry point. This is surprisingly straight forward since a vite-plugin can just modify any js file using the transform hook.

/** @returns {import('vite').Plugin} */
+                     Adding Devtools to Vite plugins    

Adding Devtools to Vite plugins

One of my favorite features in any framework is the Svelte Inspector. It allows you to click on a component and then it magically opens the relevant file in your editor.

In order to accomplish this, without the user’s having to do additional setup, they have to inject their devtool code into the browser during development. Today we will learn how to do that, so that you too can build great devtools!

Getting a Foothold - Injecting JS into the Browser

The key is to inject code into vite’s client side entry point. This is surprisingly straight forward since a vite-plugin can just modify any js file using the transform hook.

/** @returns {import('vite').Plugin} */
 const const devtoolsPlugin: () => import('vite').Plugin
@returns
devtoolsPlugin
= () => ({
OutputPlugin.name: stringname: "devtools", Plugin<any>.enforce?: "pre" | "post" | undefined
Enforce plugin invocation tier similar to webpack loaders. @@ -264,4 +264,4 @@ return { code: stringcode: code: anycode + "\n" + `import("${MAGIC_MODULE_ID}/entry.js").then(module => module.bootstrap())` } } } -})

In Conclusion

It’s not hard, but it’s a hassle. Fortunately, you only need to do this once.

\ No newline at end of file +})

In Conclusion

It’s not hard, but it’s a hassle. Fortunately, you only need to do this once.

\ No newline at end of file diff --git a/blurhash.html b/blurhash.html index a4b5743d..1b594265 100644 --- a/blurhash.html +++ b/blurhash.html @@ -1 +1 @@ - Blurhash L36@sCAb0g|;%MRkWCoe01,.~A69
big

background-image:linear-gradient(#211,#321,#322,#322,#322,#322,#211,#210),linear-gradient(#211,#221,#222,#233,#233,#222,#221,#210),linear-gradient(#211,#222,#233,#233,#244,#233,#222,#111),linear-gradient(#211,#222,#333,#444,#444,#344,#333,#211),linear-gradient(#321,#322,#443,#544,#654,#543,#432,#210),linear-gradient(#321,#422,#542,#643,#653,#643,#532,#310),linear-gradient(#321,#322,#532,#543,#643,#532,#421,#310),linear-gradient(#321,#322,#322,#333,#333,#332,#221,#200);background-position:0 0,14.3% 0,28.6% 0,42.9% 0,57.1% 0,71.4% 0,85.7% 0,100% 0;background-size:12.5% 100%;background-repeat:no-repeat;box-shadow:0 0 0 10000px #332;filter:blur(60px);clip-path:inset(0);

\ No newline at end of file + Blurhash L36@sCAb0g|;%MRkWCoe01,.~A69
big

background-image:linear-gradient(#211,#321,#322,#322,#322,#322,#211,#210),linear-gradient(#211,#221,#222,#233,#233,#222,#221,#210),linear-gradient(#211,#222,#233,#233,#244,#233,#222,#111),linear-gradient(#211,#222,#333,#444,#444,#344,#333,#211),linear-gradient(#321,#322,#443,#544,#654,#543,#432,#210),linear-gradient(#321,#422,#542,#643,#653,#643,#532,#310),linear-gradient(#321,#322,#532,#543,#643,#532,#421,#310),linear-gradient(#321,#322,#322,#333,#333,#332,#221,#200);background-position:0 0,14.3% 0,28.6% 0,42.9% 0,57.1% 0,71.4% 0,85.7% 0,100% 0;background-size:12.5% 100%;background-repeat:no-repeat;box-shadow:0 0 0 10000px #332;filter:blur(60px);clip-path:inset(0);

\ No newline at end of file diff --git a/building-an-i18n-library-for-the-modern-web.html b/building-an-i18n-library-for-the-modern-web.html index cfb936de..16f70d27 100644 --- a/building-an-i18n-library-for-the-modern-web.html +++ b/building-an-i18n-library-for-the-modern-web.html @@ -1,8 +1,8 @@ - Building an i18n library for the modern Web

Building an i18n library for the modern Web

Over the last few years we’ve seen the emergence of “partial hydration” patterns across many frameworks. The idea is that most rendering happens on the server, with only the interactive parts of a page actually shipping code to the client. The client and server cooperate to show a user a site. This idea has come in many iterations, be it React Server Components, Astro Islands, or even HTMX.

This has some interesting implications for i18n libraries.

The Status Quo

Most i18n libraries are still conceptualised as monoliths that do all the work in the same place. Language Detection, Message Fallbacks, Lazy Loading & so many more features. However, doing all the work in one place usually means doing it twice! Once on the server and again on the client. This has resulted in some truly impressive bundle sizes. i18next, one of the most popular i18n libraries needs over 40kB to render a single message. This is after bundling.

Clearly there is a lot of room for improvement.

A modern i18n library

What would an i18n library look like that embraces the cooperation between Server and Client, that’s built for partial hydration?

That’s exactly what we tried to accomplish with ParaglideJS

Paraglide is a compiler that compiles your messages into JS modules. Each message is it’s own export.

// @filename: paraglide/messages.js
+                     Building an i18n library for the modern Web    

Building an i18n library for the modern Web

Over the last few years we’ve seen the emergence of “partial hydration” patterns across many frameworks. The idea is that most rendering happens on the server, with only the interactive parts of a page actually shipping code to the client. The client and server cooperate to show a user a site. This idea has come in many iterations, be it React Server Components, Astro Islands, or even HTMX.

This has some interesting implications for i18n libraries.

    1. Since the server-rendered and client-rendered parts always share the same language, language state is global. The only way to switch languages is to rerender everything, including the server-rendered parts, which can only be done by fully reloading the page. Thus, any form of message reactivity or language lazy-loading is useless.
    1. Most Translations are rendered on the server & don’t depend on client side state
    • On the server, any i18n library really serves as a templating helper, so they should excel at doing that!
    • Since only a minority of messages will include client side state, the bundle shipped by an i18n library should only include those messages and the code they require.

The Status Quo

Most i18n libraries are still conceptualised as monoliths that do all the work in the same place. Language Detection, Message Fallbacks, Lazy Loading & so many more features. However, doing all the work in one place usually means doing it twice! Once on the server and again on the client. This has resulted in some truly impressive bundle sizes. i18next, one of the most popular i18n libraries needs over 40kB to render a single message. This is after bundling.

Clearly there is a lot of room for improvement.

A modern i18n library

What would an i18n library look like that embraces the cooperation between Server and Client, that’s built for partial hydration?

That’s exactly what we tried to accomplish with ParaglideJS

Paraglide is a compiler that compiles your messages into JS modules. Each message is it’s own export.

// @filename: paraglide/messages.js
 /**
  * @param {{ name: string }} params
  */
 export const greeting = (params) => `Hello ${params.name}`
 
 export const my_other_message = () => `My Other Message`
-// ...

This takes advantage of modern tooling.

  1. TypeScript. Messages are fully type-safe, including any parameters they take. This makes Paraglide a joy to use for templating.
  2. Modern Build tools remove JS code that isn’t used automatically. Because messages are individual JS exports, they can individually be removed if they aren’t used. This automatically only ships messages that are needed on the client. This results in some tiny bundle-sizes, starting as low as 100 bytes.

We can further take advantage of the cooperation between server and client to skip language detection on the client entirely. Because the server already decided which language to render, the client bundle can just read which language was used from the HTML.

Because ParaglideJSis a compiler, fallback messages can be resolved at build time, so no runtime code is needed for that.

So far, this approach is working very well in any partial-hydration setting. However, even in frameworks without partial hydration ParaglideJS can still be useful. It still only ships messages that are used on a given page without you needing to manually split messages into namespaces as you usually would.

Conclusion

Going forward, scaling down and integration with modern tooling is going to be increasingly important for i18n libraries. ParaglideJS is one attempt at this which can be used today. Clearly there is a lot of room for innovation in this space & we’re interested in how it will develop over the next few months and years.

\ No newline at end of file +// ...

This takes advantage of modern tooling.

  1. TypeScript. Messages are fully type-safe, including any parameters they take. This makes Paraglide a joy to use for templating.
  2. Modern Build tools remove JS code that isn’t used automatically. Because messages are individual JS exports, they can individually be removed if they aren’t used. This automatically only ships messages that are needed on the client. This results in some tiny bundle-sizes, starting as low as 100 bytes.

We can further take advantage of the cooperation between server and client to skip language detection on the client entirely. Because the server already decided which language to render, the client bundle can just read which language was used from the HTML.

Because ParaglideJSis a compiler, fallback messages can be resolved at build time, so no runtime code is needed for that.

So far, this approach is working very well in any partial-hydration setting. However, even in frameworks without partial hydration ParaglideJS can still be useful. It still only ships messages that are used on a given page without you needing to manually split messages into namespaces as you usually would.

Conclusion

Going forward, scaling down and integration with modern tooling is going to be increasingly important for i18n libraries. ParaglideJS is one attempt at this which can be used today. Clearly there is a lot of room for innovation in this space & we’re interested in how it will develop over the next few months and years.

\ No newline at end of file diff --git a/declarative-exception-handling.html b/declarative-exception-handling.html index d61409fb..d4bad726 100644 --- a/declarative-exception-handling.html +++ b/declarative-exception-handling.html @@ -1,4 +1,4 @@ - Declarative Exception Handling in JavaScript

Declarative Exception Handling

In this article we will explore a potential remedy to the nightmare that is error handling in JS.

The State of Error Handling in JS

If you have a function that might fail, you would probably do something like this.

let let result: anyresult;
+                     Declarative Exception Handling in JavaScript    

Declarative Exception Handling

In this article we will explore a potential remedy to the nightmare that is error handling in JS.

The State of Error Handling in JS

If you have a function that might fail, you would probably do something like this.

let let result: anyresult;
 try {
   const 
const user: {
     name: string;
@@ -314,4 +314,4 @@
  */
 const raise = (e) => {
   throw e;
-};
\ No newline at end of file +};
\ No newline at end of file diff --git a/dts-buddy-fixes-type-declarations.html b/dts-buddy-fixes-type-declarations.html index 16d94397..523155b0 100644 --- a/dts-buddy-fixes-type-declarations.html +++ b/dts-buddy-fixes-type-declarations.html @@ -1,4 +1,4 @@ - dts-buddy fixes type declarations

DTS-Buddy fixes Type-Declarations

dts-buddy is one of those tools that’s hard to justify without encountering the problem they solve first. Here’s the story of how I ran into it.

If you don’t care why, you can skip past the intro

A horror story about types in subpackages

I’ve been finding myself copying a lot of code between projects. To make this easier, I’ve been building a package where I keep all my commonly used code. Since it includes code from many domains, using subpackages seemed quite natural. @sigrist.dev/framework/pdf for all my PDF-related code, @sigrist.dev/framework/ui for all my UI-related code, and so on.

To keep editing convenient, I opted to use JSDoc types, and generate type-declarations from them.

While doing this, I quickly learned about the pitfalls of using subpackages. Using the TypeScript compiler means I was generating one d.ts file per js file. This caused a problems when importing a subpackage. Whenever I started typing import { the IDE would show me a list of all the types present in the package, including ones that were not meant to be public. This was very irritating.

Another issue I ran into is that go to definition didn’t work. I couldn’t jump to the implementation of a type, because the IDE didn’t know how to map the type-declaration to the actual source code. As the declarations were always colocated with the JS file it wasn’t that bad, but still inconveniet. I often have to glance at the implementation to recall what was going on, since the package isn’t documented well.

Dts-Buddy; The savior

dts-buddy solves all these things in a very clever way. Instead of colocating each type-declaration with the JS file it belongs to, it generates just one .d.ts file for the entire package. This is then referenced by the types field in your package.json. The file contains module declarations for the public interface of the package and it’s subpackages, using the declare module syntax.

Here’s an example output it generated for one of my (private, sorry) packages:

// @filename: dist/types.d.ts
+                     dts-buddy fixes type declarations    

DTS-Buddy fixes Type-Declarations

dts-buddy is one of those tools that’s hard to justify without encountering the problem they solve first. Here’s the story of how I ran into it.

If you don’t care why, you can skip past the intro

A horror story about types in subpackages

I’ve been finding myself copying a lot of code between projects. To make this easier, I’ve been building a package where I keep all my commonly used code. Since it includes code from many domains, using subpackages seemed quite natural. @sigrist.dev/framework/pdf for all my PDF-related code, @sigrist.dev/framework/ui for all my UI-related code, and so on.

To keep editing convenient, I opted to use JSDoc types, and generate type-declarations from them.

While doing this, I quickly learned about the pitfalls of using subpackages. Using the TypeScript compiler means I was generating one d.ts file per js file. This caused a problems when importing a subpackage. Whenever I started typing import { the IDE would show me a list of all the types present in the package, including ones that were not meant to be public. This was very irritating.

Another issue I ran into is that go to definition didn’t work. I couldn’t jump to the implementation of a type, because the IDE didn’t know how to map the type-declaration to the actual source code. As the declarations were always colocated with the JS file it wasn’t that bad, but still inconveniet. I often have to glance at the implementation to recall what was going on, since the package isn’t documented well.

Dts-Buddy; The savior

dts-buddy solves all these things in a very clever way. Instead of colocating each type-declaration with the JS file it belongs to, it generates just one .d.ts file for the entire package. This is then referenced by the types field in your package.json. The file contains module declarations for the public interface of the package and it’s subpackages, using the declare module syntax.

Here’s an example output it generated for one of my (private, sorry) packages:

// @filename: dist/types.d.ts
 declare module '@sigrist.dev/framework/pdf' {
     /**
     * Adds a QR-ESR Invoice footer to the given PDF.
@@ -62,4 +62,4 @@
 			"import": "./src/ui/index.js"
 		}
 	}
-}

That’s it! Now you can run npm run build and it’ll generate a single type-declaration file (+map) for your entire package.

Should you use it?

dts-buddy is a tool that solves the subpackage-problem very very well. Outside of that, the regular TypeScript compiler is good enough. It’s going to be more familiar to most developers and is maintained more actively. But when you do need dts-buddy, it’s a lifesaver.

I for one have really enjoyed it and am very likely to choose it again.

\ No newline at end of file +}

That’s it! Now you can run npm run build and it’ll generate a single type-declaration file (+map) for your entire package.

Should you use it?

dts-buddy is a tool that solves the subpackage-problem very very well. Outside of that, the regular TypeScript compiler is good enough. It’s going to be more familiar to most developers and is maintained more actively. But when you do need dts-buddy, it’s a lifesaver.

I for one have really enjoyed it and am very likely to choose it again.

\ No newline at end of file diff --git a/fluent-inputs.html b/fluent-inputs.html index 86ebb734..2803f73b 100644 --- a/fluent-inputs.html +++ b/fluent-inputs.html @@ -1,4 +1,4 @@ - Fluent Inputs

Fluent Inputs

A few months ago, I stumbled accross Adam Silver’s article Designing A Time Input. In it he very persuasively argued that the ideal time input should be a single text input that accepts a wide range of formats. I followed his advice in a few projects and was very pleased with the results.

Down the Rabbit Hole and to the Prototype

When I at some point needed a datetime input I decided to reuse the idea. A text field that allow all kinds of formats. But why just “12. July 2023”, why not “Tomorrow” or “Next Monday”. So I also added some special cases. But the slippery slope only started there. What about “Thursday in two weeks at noon”? My ambition soon exceeded my ability and patience, so I started looking for libraries.

I found chrono-node which promised to do what I wanted. I used it to build the following prototype. Try it!

Please enter a time and date

It’s functional, but there are some issues I observed while testing it on a few people.

  1. Users don’t trust that the input will understand them. They wait for feedback before moving on.
  2. Result are often lost while typing. Typing “Mon”, will be recognised as monday, but “Mond” won’t be recognised anymore, even though the user is likely to continue typing “Monday”.
  3. It fills in the blanks too liberally. If you say “In two days” it will fill in a time. This caused several users to enter incorrect values.
  4. Confusingly inconsistent. Some basic instructions are not recognised. “On the 12th” is not recognised, but “On the 12th of July” is.
  5. Typing on mobile is annoying.

These problems are a mix of UX and implementation problems. They will all need to be fixed. But the idea did show some promise. There are some obvious advantages.

  1. Very expressive. The gap between what the user is thinking and what they need to type is much smaller.
  2. Works without JavaScript - Parsing can be done on the server as fallback (intentionally disabled in this demo).
  3. Dictation is easy. Dictation is becoming more and more popular, especially among former iPad babies that learned to browser YouTube before they learned to write.
  4. Accessible. The input is read in a very natural way by screen readers.

While I wouldn’t use the above input anywhere it’s a fine starting point to explore the idea of inputs that accept plain english. These are sometimes called “Natural Language Inputs”, but that could also refer to this kind of input. To avoid confusion, I’ll coin and use the term “Fluent Inputs”. (obligatory xkcd 927)

Doing some Experiments

For now, I’m going to handwave away the parsing problem and focus on the UX. I have a few ideas that I want to test.

User Feedback Testing

TODO

Fluent Inputs as Infodumps

One idea I had is that a Text-Area could be used as an infodump, substituting a whole form. If the user knew what information was expected they could just type it in in a stream of consciousness style. This might be usefull for things like calendars, where the expected information is known, and the details might be copy-pasted from an email or website.

Let’s test that.

TODO

The Implementation Problem

I’ve side-stepped the actual implementation of the parser until now. That’s because I don’t have a good solution yet.

We have all the usuall i18n problems. Different languages, different cultural expectations. These are always hard problem, but they are even harder to get right here.

TODO

What about LLMs?

Language Models are all the rage right now. They are very good at parsing natural language, so why not use them here?

Well, they do have some limitations that (currently) hold them back from reliably parsing user inputs.

  1. They are slow. Ideally we would want to parse the input on every keystroke. Current LLMs aren’t fast enough for that.
  2. Can’t run in the browser. Any parsing would need to happen on an external LLM server. This causes latency issues and privacy concerns (since the state is sent on each keystroke, not just at the end).
  3. Not repeatable. The same input may be parsed differently on the client and on the server.
  4. They are (currently) bad at dates. If you tell it “in four days”, it would need to both know the current date and be able to calculate the date four days from now. LLMs currently suck at math. Clockface math is even more confusing. This does not stop them from confidently giving you a wrong answer though.

However, as LLMs get better and more widely available, they might become the best solution eventually. Alternative parsers get increasingly hard to write as the complexity of the input increases. LLMs handle that better.

In the meantime, there is a halfway solution. Instead of using the LLM itself as the parser, you could use it as a preprocessor to extract easier to digest strings from the input. This would allow you to use simpler parsers to get the actual data. The LLM doesn’t have to do any logic, just language-manipulation. This is an easier job, so faster and cheaper models can be used.

Here is an example of what that might look like. The input is first passed through a LLM to extract the relevant strings. Then a simple parser is used to parse the extracted strings.

Here is an example prompt and the extracted strings.

The following text includes information about a calendar event.
+                      Fluent Inputs    

Fluent Inputs

A few months ago, I stumbled accross Adam Silver’s article Designing A Time Input. In it he very persuasively argued that the ideal time input should be a single text input that accepts a wide range of formats. I followed his advice in a few projects and was very pleased with the results.

Down the Rabbit Hole and to the Prototype

When I at some point needed a datetime input I decided to reuse the idea. A text field that allow all kinds of formats. But why just “12. July 2023”, why not “Tomorrow” or “Next Monday”. So I also added some special cases. But the slippery slope only started there. What about “Thursday in two weeks at noon”? My ambition soon exceeded my ability and patience, so I started looking for libraries.

I found chrono-node which promised to do what I wanted. I used it to build the following prototype. Try it!

Please enter a time and date

It’s functional, but there are some issues I observed while testing it on a few people.

  1. Users don’t trust that the input will understand them. They wait for feedback before moving on.
  2. Result are often lost while typing. Typing “Mon”, will be recognised as monday, but “Mond” won’t be recognised anymore, even though the user is likely to continue typing “Monday”.
  3. It fills in the blanks too liberally. If you say “In two days” it will fill in a time. This caused several users to enter incorrect values.
  4. Confusingly inconsistent. Some basic instructions are not recognised. “On the 12th” is not recognised, but “On the 12th of July” is.
  5. Typing on mobile is annoying.

These problems are a mix of UX and implementation problems. They will all need to be fixed. But the idea did show some promise. There are some obvious advantages.

  1. Very expressive. The gap between what the user is thinking and what they need to type is much smaller.
  2. Works without JavaScript - Parsing can be done on the server as fallback (intentionally disabled in this demo).
  3. Dictation is easy. Dictation is becoming more and more popular, especially among former iPad babies that learned to browser YouTube before they learned to write.
  4. Accessible. The input is read in a very natural way by screen readers.

While I wouldn’t use the above input anywhere it’s a fine starting point to explore the idea of inputs that accept plain english. These are sometimes called “Natural Language Inputs”, but that could also refer to this kind of input. To avoid confusion, I’ll coin and use the term “Fluent Inputs”. (obligatory xkcd 927)

Doing some Experiments

For now, I’m going to handwave away the parsing problem and focus on the UX. I have a few ideas that I want to test.

User Feedback Testing

TODO

Fluent Inputs as Infodumps

One idea I had is that a Text-Area could be used as an infodump, substituting a whole form. If the user knew what information was expected they could just type it in in a stream of consciousness style. This might be usefull for things like calendars, where the expected information is known, and the details might be copy-pasted from an email or website.

Let’s test that.

TODO

The Implementation Problem

I’ve side-stepped the actual implementation of the parser until now. That’s because I don’t have a good solution yet.

We have all the usuall i18n problems. Different languages, different cultural expectations. These are always hard problem, but they are even harder to get right here.

TODO

What about LLMs?

Language Models are all the rage right now. They are very good at parsing natural language, so why not use them here?

Well, they do have some limitations that (currently) hold them back from reliably parsing user inputs.

  1. They are slow. Ideally we would want to parse the input on every keystroke. Current LLMs aren’t fast enough for that.
  2. Can’t run in the browser. Any parsing would need to happen on an external LLM server. This causes latency issues and privacy concerns (since the state is sent on each keystroke, not just at the end).
  3. Not repeatable. The same input may be parsed differently on the client and on the server.
  4. They are (currently) bad at dates. If you tell it “in four days”, it would need to both know the current date and be able to calculate the date four days from now. LLMs currently suck at math. Clockface math is even more confusing. This does not stop them from confidently giving you a wrong answer though.

However, as LLMs get better and more widely available, they might become the best solution eventually. Alternative parsers get increasingly hard to write as the complexity of the input increases. LLMs handle that better.

In the meantime, there is a halfway solution. Instead of using the LLM itself as the parser, you could use it as a preprocessor to extract easier to digest strings from the input. This would allow you to use simpler parsers to get the actual data. The LLM doesn’t have to do any logic, just language-manipulation. This is an easier job, so faster and cheaper models can be used.

Here is an example of what that might look like. The input is first passed through a LLM to extract the relevant strings. Then a simple parser is used to parse the extracted strings.

Here is an example prompt and the extracted strings.

The following text includes information about a calendar event.
 
 """
 Meeting with John, Conference room 103 tomorrow 12:30 until five past two, bring the report & wish Janine a happy birthday
@@ -18,4 +18,4 @@
 
 You do not need to format your answers in any particular way. 
 The answers can be natural language.
-If the information for one of the questions is not available leave it blank.

This results in the following output (markdown table)

Question Answer
What is the title of the event? Meeting with John
What day does it start? Tomorrow
What time does it start? 12:30
What day does it end? Tomorrow
What time does it end? Five past two (2:05)
Where is it? Conference room 103
Additional details (if applicable)? Bring the report & wish Janine a happy birthday

This is already pretty good. We’ve broken down a fairly complex input, which does the job previously done by a whole form, into a few simple strings. We now only have to parse these individual strings, not the whole input.

You could also use the LLM to translate the prompt, making it easier to adapt to different languages.

We needn’t worry about prompt-injection attacks here, since the user is entering data for themselves. Tricking the LLM does not achieve anything here. However, from a security perspective, you should consider the output of the LLM as untrusted, as if it were user input itself.

This still doesn’t solve the latency and privacy problems, but the others are gone. The LLM doesn’t have to do logic, and it’s consistent enough that repeated parsing will (usually) give the same result.

Getting Started Today

There are some great libraries out there that can get you started with Fluent Inputs today. I’ve provided a few examples by category below.

Date/Time

  • Timeliness - A library for parsing dates and times in a variety of formats.
  • Chrono-Node - Similar to Timeliness, but with more features and a larger footprint. Great for appointments.
\ No newline at end of file +If the information for one of the questions is not available leave it blank.

This results in the following output (markdown table)

Question Answer
What is the title of the event? Meeting with John
What day does it start? Tomorrow
What time does it start? 12:30
What day does it end? Tomorrow
What time does it end? Five past two (2:05)
Where is it? Conference room 103
Additional details (if applicable)? Bring the report & wish Janine a happy birthday

This is already pretty good. We’ve broken down a fairly complex input, which does the job previously done by a whole form, into a few simple strings. We now only have to parse these individual strings, not the whole input.

You could also use the LLM to translate the prompt, making it easier to adapt to different languages.

We needn’t worry about prompt-injection attacks here, since the user is entering data for themselves. Tricking the LLM does not achieve anything here. However, from a security perspective, you should consider the output of the LLM as untrusted, as if it were user input itself.

This still doesn’t solve the latency and privacy problems, but the others are gone. The LLM doesn’t have to do logic, and it’s consistent enough that repeated parsing will (usually) give the same result.

Getting Started Today

There are some great libraries out there that can get you started with Fluent Inputs today. I’ve provided a few examples by category below.

Date/Time

  • Timeliness - A library for parsing dates and times in a variety of formats.
  • Chrono-Node - Similar to Timeliness, but with more features and a larger footprint. Great for appointments.
\ No newline at end of file diff --git a/i-started-taking-notes.html b/i-started-taking-notes.html index d11c18da..1aac5dff 100644 --- a/i-started-taking-notes.html +++ b/i-started-taking-notes.html @@ -1 +1 @@ - I've Started Taking Notes

I’ve Started Taking Notes

I have always had a personality that gets sucked in very easily. This is great for learning things, but it can be dangerous. With the endless recommendation algorithms always throwing more stuff at you it’s very easy to waste your time. I used to frequently step away from watching YouTube feeling unsatisfied with how I spent my time.

A habit that has helped me a lot is to always take notes when I’m watching something. The goal isn’t to learn or study, just to stay present. This forces me to make the concious choice about continuing to watch. If I struggle to write anything down, I should probably do something else.

It takes surprisingly little discipline to do this. It’s actually infuriating to watch something with a notepad open, but without anything worth writing down happening. You will want to stop watching & find something else.

This habit hasn’t reduced the time I spend watching stuff, but it has lead me to watch stuff that I find more satisfying. It’s amazing how much high quality stuff there is out there about interesting topics. Beneath all the slop there is stuff that’s worth your time. You just need to force yourself to find it. Taking notes has helped me do that.

\ No newline at end of file + I've Started Taking Notes

I’ve Started Taking Notes

I have always had a personality that gets sucked in very easily. This is great for learning things, but it can be dangerous. With the endless recommendation algorithms always throwing more stuff at you it’s very easy to waste your time. I used to frequently step away from watching YouTube feeling unsatisfied with how I spent my time.

A habit that has helped me a lot is to always take notes when I’m watching something. The goal isn’t to learn or study, just to stay present. This forces me to make the concious choice about continuing to watch. If I struggle to write anything down, I should probably do something else.

It takes surprisingly little discipline to do this. It’s actually infuriating to watch something with a notepad open, but without anything worth writing down happening. You will want to stop watching & find something else.

This habit hasn’t reduced the time I spend watching stuff, but it has lead me to watch stuff that I find more satisfying. It’s amazing how much high quality stuff there is out there about interesting topics. Beneath all the slop there is stuff that’s worth your time. You just need to force yourself to find it. Taking notes has helped me do that.

\ No newline at end of file diff --git a/index.html b/index.html index 1c93f48c..ad9c34f4 100644 --- a/index.html +++ b/index.html @@ -1 +1 @@ - Loris Sigrist

Loris Sigrist

Building fun stuff on the Internet. Occasional blogger, Frequent Nerd.

Building an i18n library for the modern Web

The rise of metaframeworks with partial hydration has raised some interesting challenges for i18n libraries. This article explores how ParaglideJS attempts to tackle them.

Declarative Exception Handling in JavaScript

Handling many different exception types in JavaScript is a pain. In this article we will play around with a declarative approach to exception handling.

Adding Devtools to Vite plugins

Many frontend frameworks and tools come in the form of Vite-plugins. Here is how plugin authors can inject devtools into the browser during development.

A neat Pattern for Melt UI

Sometimes I want to share behavior of my Melt UI based components with their children. I've found a neat pattern to do so.

The poor man's scheduled post

If you build your site on github pages or netlify, you don't have a server to schedule posts. This article gets around that by using a github action to periodically redeploy your site.

dts-buddy fixes type declarations

dts-buddy is a bundler for type-definitions. It helps sidestep the most common issues with type definitions.

Reliably Avoiding Theme flashes

On sites with a theme-toggle, flashing the default theme before the user's choice is loaded is a common problem. Here's how to avoid it client-side only.

The better way to load data

When loading data, we usually fetch, wait for the response and then render the page. If the fetch is slow, our page is slow. But what if we could render the page while the data is still loading? This is what this article is about.

Never write Mock Data again, with Zocker

Over tha last couple weeks I've been building a library to generate mock data for my projects. Today I'm happy to announce the first stable release of Zocker, a Schema first Mock Data Generator.

\ No newline at end of file + Loris Sigrist

Loris Sigrist

Building fun stuff on the Internet. Occasional blogger, Frequent Nerd.

Building an i18n library for the modern Web

The rise of metaframeworks with partial hydration has raised some interesting challenges for i18n libraries. This article explores how ParaglideJS attempts to tackle them.

Declarative Exception Handling in JavaScript

Handling many different exception types in JavaScript is a pain. In this article we will play around with a declarative approach to exception handling.

Adding Devtools to Vite plugins

Many frontend frameworks and tools come in the form of Vite-plugins. Here is how plugin authors can inject devtools into the browser during development.

A neat Pattern for Melt UI

Sometimes I want to share behavior of my Melt UI based components with their children. I've found a neat pattern to do so.

The poor man's scheduled post

If you build your site on github pages or netlify, you don't have a server to schedule posts. This article gets around that by using a github action to periodically redeploy your site.

dts-buddy fixes type declarations

dts-buddy is a bundler for type-definitions. It helps sidestep the most common issues with type definitions.

Reliably Avoiding Theme flashes

On sites with a theme-toggle, flashing the default theme before the user's choice is loaded is a common problem. Here's how to avoid it client-side only.

The better way to load data

When loading data, we usually fetch, wait for the response and then render the page. If the fetch is slow, our page is slow. But what if we could render the page while the data is still loading? This is what this article is about.

Never write Mock Data again, with Zocker

Over tha last couple weeks I've been building a library to generate mock data for my projects. Today I'm happy to announce the first stable release of Zocker, a Schema first Mock Data Generator.

\ No newline at end of file diff --git a/melt-ui-neat-pattern.html b/melt-ui-neat-pattern.html index 7d3280c5..9a6c9655 100644 --- a/melt-ui-neat-pattern.html +++ b/melt-ui-neat-pattern.html @@ -1,4 +1,4 @@ - A neat Pattern for Melt UI

A neat pattern for Melt UI

While working with Melt UI, I’ve stumbled upon a pattern that has been quite handy several times. Sharing stuff from the same builder between components using the let: directive. Here is an example:

I often find myself in the need for a generic Tooltip component, one where the Trigger is some outside object. Since I use Melt UI for my projects anyway, I really want to use it’s Tooltip builder. However, it’s not immediately obvious how you can do that. Melt UI’s docs always have one component that fully encapsulates all the behavior.

But it’s actually quite doable thanks to the let: directive. You can use it to pass the Tooltip’s trigger prop to the outside world. Ideally this would be as simple as:

<!--Tooltip.svelte-->
+                     A neat Pattern for Melt UI    

A neat pattern for Melt UI

While working with Melt UI, I’ve stumbled upon a pattern that has been quite handy several times. Sharing stuff from the same builder between components using the let: directive. Here is an example:

I often find myself in the need for a generic Tooltip component, one where the Trigger is some outside object. Since I use Melt UI for my projects anyway, I really want to use it’s Tooltip builder. However, it’s not immediately obvious how you can do that. Melt UI’s docs always have one component that fully encapsulates all the behavior.

But it’s actually quite doable thanks to the let: directive. You can use it to pass the Tooltip’s trigger prop to the outside world. Ideally this would be as simple as:

<!--Tooltip.svelte-->
 <script>
     import { createTooltip } from '@melt-ui/svelte';
 
@@ -44,4 +44,4 @@
 
 <Tooltip let:triggerAction let:triggerProps>
   <button use:triggerAction {...triggerProps}>Do Something</button>
-</Tooltip>

That’s it.

This pattern is has been useful to me a couple times so I wanted to share.

\ No newline at end of file +</Tooltip>

That’s it.

This pattern is has been useful to me a couple times so I wanted to share.

\ No newline at end of file diff --git a/mock-data-with-zocker.html b/mock-data-with-zocker.html index 24ca492f..8575c2df 100644 --- a/mock-data-with-zocker.html +++ b/mock-data-with-zocker.html @@ -1,4 +1,4 @@ - Never write Mock Data again, with Zocker

Never write Mock-Data again, with Zocker

The trend of zod-driven-development continues! This time, we’re going to use zod to generate sensible mock-data for our tests.

Writing Mock Data is the worst

When writing tests, you often need to provide some mock-data to test your code against. This can be a real pain, especially if you need lot’s of it, and if it’s complex.

Most mock-data generation libraries, such as the excellent faker, supply only individual fields, not entire data-structures.

Manually assembling these fields into a data-structure is tedious, and maintenance-heavy.

Enter Zocker

Zocker is a library I’ve built to forever eliminate the pain of writing and maintaining mock-data. It uses your zod-schemas as a guide to generate sensible and realistic mock-data for you. This way you can focus on writing tests, not on writing mock-data. Data generation does not get harder if you need more data, or if your data gets more complex. It’s all handled for you.

Getting Started

Obviously, install it first:

npm install --save-dev zocker

Then, in your test-file, import the zocker function and pass it your zod-schema:

import { import zz } from 'zod';
+                     Never write Mock Data again, with Zocker    

Never write Mock-Data again, with Zocker

The trend of zod-driven-development continues! This time, we’re going to use zod to generate sensible mock-data for our tests.

Writing Mock Data is the worst

When writing tests, you often need to provide some mock-data to test your code against. This can be a real pain, especially if you need lot’s of it, and if it’s complex.

Most mock-data generation libraries, such as the excellent faker, supply only individual fields, not entire data-structures.

Manually assembling these fields into a data-structure is tedious, and maintenance-heavy.

Enter Zocker

Zocker is a library I’ve built to forever eliminate the pain of writing and maintaining mock-data. It uses your zod-schemas as a guide to generate sensible and realistic mock-data for you. This way you can focus on writing tests, not on writing mock-data. Data generation does not get harder if you need more data, or if your data gets more complex. It’s all handled for you.

Getting Started

Obviously, install it first:

npm install --save-dev zocker

Then, in your test-file, import the zocker function and pass it your zod-schema:

import { import zz } from 'zod';
 import { function zocker<Z extends z.ZodType<any, z.ZodTypeDef, any>>(schema: Z): Zocker<Z>zocker } from 'zocker';
 
 const 
const schema: z.ZodObject<{
@@ -277,4 +277,4 @@
     age: number;
 }
generate
();
-// { name: "Jimmy Smith", age: 42 } - The name is now fixed

Limitations

There are a few limitations though. Zocker will never be able to generate data for preprocess or refinement functions. At least not out of the box. We can however supply our own values for those (sub)schemas, and side-step the issue.

Repeatability

By default, zocker will generate a new random value for each schema. This is great for most cases, but can lead to flaky tests if you’re not careful. If you want to generate the same data every time, you can set a seed using the setSeed method. This will generate the same data every time.

const const mockData: anymockData = zocker<ZodAny>(schema: ZodAny): Zocker<ZodAny>zocker(const schema: ZodAnyschema).Zocker<ZodAny>.setSeed(seed: number): Zocker<ZodAny>setSeed(42).Zocker<ZodAny>.generate(): anygenerate();

Conclusion

I hope this article has given you a taste of what zocker can do. If you want to learn more, check out the documentation. In my own use, zocker has been a huge time-saver. I hope it can help you too!

\ No newline at end of file +// { name: "Jimmy Smith", age: 42 } - The name is now fixed

Limitations

There are a few limitations though. Zocker will never be able to generate data for preprocess or refinement functions. At least not out of the box. We can however supply our own values for those (sub)schemas, and side-step the issue.

Repeatability

By default, zocker will generate a new random value for each schema. This is great for most cases, but can lead to flaky tests if you’re not careful. If you want to generate the same data every time, you can set a seed using the setSeed method. This will generate the same data every time.

const const mockData: anymockData = zocker<ZodAny>(schema: ZodAny): Zocker<ZodAny>zocker(const schema: ZodAnyschema).Zocker<ZodAny>.setSeed(seed: number): Zocker<ZodAny>setSeed(42).Zocker<ZodAny>.generate(): anygenerate();

Conclusion

I hope this article has given you a taste of what zocker can do. If you want to learn more, check out the documentation. In my own use, zocker has been a huge time-saver. I hope it can help you too!

\ No newline at end of file diff --git a/reliably-avoiding-theme-flashes.html b/reliably-avoiding-theme-flashes.html index 61833e6a..88ca0d18 100644 --- a/reliably-avoiding-theme-flashes.html +++ b/reliably-avoiding-theme-flashes.html @@ -1,4 +1,4 @@ - Reliably Avoiding Theme flashes

Reliably avoiding Theme Flashes

TLDR: Add a non-deferred script tag at the start of the document and react to the user’s theme preference there

A common issue on sites with theme-toggles is a flash of the wrong theme when the page loads. In the hilariously titled post Flash of inAccurate coloR Theme (FART), Chris Coyier coined the term FART to describe this phenomenon. In this post, we will explore the cause of FARTs, and how to avoid them client-side only. Along the way we will be learning about how page-loading works in the browser.

Diagnosing the Problem

HTML is a streaming format. This is great, since we can display the content of a page before it has fully loaded. Even massive sites like the HTML Specification with it’s 13MB (!) of raw HTML can be displayed almost instantly. This is one of the most underrated features of the web.

But this poses a question when running Javascript. If the page has not fully loaded by the time our code runs, how does a selector like document.getElementById behave? Well, it only gets run on the part that has already been loaded. This is dangerous when trying to hydrate a page, since the elements your code is trying to hydrate might not be there yet. To avoid this, pretty much all sites run their JS after the HTML has been fully recieved and parsed. We used to do this with DOMContentLoaded or by putting our scripts at the bottom of the page, but today we usually use defered, or type=module on the script tag to achieve this.

There is also the additional problem that non-deferred scripts are render-blocking, meaning that the browser will not render anything that comes after them until they have finished running. Loading your frontend framework like this would completely negate the benefits of SSR or prerendering, the behaviour would be identical to pure client-side-rendering.

For these reasons pretty much all sites load their JS code in a deferred manner.

Unfortunately this causes FARTs. In order to decide which theme we should display, we need to check some sort local persistence. That might be localStorage, IndexedDB or a plain old cookie. Either way, JS needs to run. If our JS runs after the page has been fully parsed, as is the default nowadays, we get a flash of the default theme. You might get lucky and the browser will run your JS between finishing parsing and rendering, but you can’t rely on that.

The Solution

We need to somehow determine the desired theme before the page gets rendered. The way we do this is by returning the web-development stone age. The primitive <script> tag with no defer or type="module" attributes will block the page from rendering until it has run. If we put our theme code in such a script we will not get a FART.

Tip: Most projects have a skeleton HTML file somewhere, which is used as a template for all pages. Add the script there.

<!DOCTYPE html>
+                     Reliably Avoiding Theme flashes    

Reliably avoiding Theme Flashes

TLDR: Add a non-deferred script tag at the start of the document and react to the user’s theme preference there

A common issue on sites with theme-toggles is a flash of the wrong theme when the page loads. In the hilariously titled post Flash of inAccurate coloR Theme (FART), Chris Coyier coined the term FART to describe this phenomenon. In this post, we will explore the cause of FARTs, and how to avoid them client-side only. Along the way we will be learning about how page-loading works in the browser.

Diagnosing the Problem

HTML is a streaming format. This is great, since we can display the content of a page before it has fully loaded. Even massive sites like the HTML Specification with it’s 13MB (!) of raw HTML can be displayed almost instantly. This is one of the most underrated features of the web.

But this poses a question when running Javascript. If the page has not fully loaded by the time our code runs, how does a selector like document.getElementById behave? Well, it only gets run on the part that has already been loaded. This is dangerous when trying to hydrate a page, since the elements your code is trying to hydrate might not be there yet. To avoid this, pretty much all sites run their JS after the HTML has been fully recieved and parsed. We used to do this with DOMContentLoaded or by putting our scripts at the bottom of the page, but today we usually use defered, or type=module on the script tag to achieve this.

There is also the additional problem that non-deferred scripts are render-blocking, meaning that the browser will not render anything that comes after them until they have finished running. Loading your frontend framework like this would completely negate the benefits of SSR or prerendering, the behaviour would be identical to pure client-side-rendering.

For these reasons pretty much all sites load their JS code in a deferred manner.

Unfortunately this causes FARTs. In order to decide which theme we should display, we need to check some sort local persistence. That might be localStorage, IndexedDB or a plain old cookie. Either way, JS needs to run. If our JS runs after the page has been fully parsed, as is the default nowadays, we get a flash of the default theme. You might get lucky and the browser will run your JS between finishing parsing and rendering, but you can’t rely on that.

The Solution

We need to somehow determine the desired theme before the page gets rendered. The way we do this is by returning the web-development stone age. The primitive <script> tag with no defer or type="module" attributes will block the page from rendering until it has run. If we put our theme code in such a script we will not get a FART.

Tip: Most projects have a skeleton HTML file somewhere, which is used as a template for all pages. Add the script there.

<!DOCTYPE html>
 <html>
 	<head>
 		...
@@ -10,4 +10,4 @@
 	<body>
 		<div id="app">...</div>
 	</body>
-</html>

Beware though. Since the HTML is not yet fully parsed when this script runs you only have guaranteed access to the elements that come before it, not after it. In the above example we could not safely add a class to the body, only to the html tag. Any elements that come before the script could potentially be displayed before it has run. You also need to account for that. The best place to put the script is usually either in the head, or as the first thing in the body.

Warning: If the theme logic is more complex, it’s tempting to put the JS in a separate file and load it via src. That would require an additional HTTP request, during which the page cannot be rendered. Just inline it.

There is another benefit to this. Elements with css transitions can often look awkward when switching themes, since they take longer than the rest of the page. This is especially noticeable during FARTs. By running the theme-switching code before the page has been rendered this problem is avoided, since the initial render will already be in the correct theme.

Addressing the concerns

Some developers will avoid blocking scripts like the plague, since they used to be common sources of performance issues. However, in our case, the page cannot safely be rendered before the code has run, so this is an exception. Understanding why something is considered bad practice is key to knowing when it’s okay to break the rules. Think of it like this: The theme-checking code needs to run anyway, so we might as well run it as early as possible. There is no performance loss.

\ No newline at end of file +</html>

Beware though. Since the HTML is not yet fully parsed when this script runs you only have guaranteed access to the elements that come before it, not after it. In the above example we could not safely add a class to the body, only to the html tag. Any elements that come before the script could potentially be displayed before it has run. You also need to account for that. The best place to put the script is usually either in the head, or as the first thing in the body.

Warning: If the theme logic is more complex, it’s tempting to put the JS in a separate file and load it via src. That would require an additional HTTP request, during which the page cannot be rendered. Just inline it.

There is another benefit to this. Elements with css transitions can often look awkward when switching themes, since they take longer than the rest of the page. This is especially noticeable during FARTs. By running the theme-switching code before the page has been rendered this problem is avoided, since the initial render will already be in the correct theme.

Addressing the concerns

Some developers will avoid blocking scripts like the plague, since they used to be common sources of performance issues. However, in our case, the page cannot safely be rendered before the code has run, so this is an exception. Understanding why something is considered bad practice is key to knowing when it’s okay to break the rules. Think of it like this: The theme-checking code needs to run anyway, so we might as well run it as early as possible. There is no performance loss.

\ No newline at end of file diff --git a/tailwind-ui.html b/tailwind-ui.html index 6684bfd6..d8747cf9 100644 --- a/tailwind-ui.html +++ b/tailwind-ui.html @@ -1,4 +1,4 @@ - Is Tailwind UI worth it?
The Tailwind UI Landing Page showing a bunch of beautiful components

Is Tailwind UI worth it?

Tailwind UI is a collection of high-quality components and templates built by the creators of Tailwind CSS. It’s 300$ price-tag for a single developer license is quite steep when considering all the free alternatives out there. Could it really be worth it?

I’ve been using Tailwind UI for a couple months now. In this article, I’ll go through how it has affected my workflow, what I liked and disliked about it, and who should consider buying it.

Since Tailwind UI and Tailwind CSS share similar, verbose names, I’ll refer to them as TUI and TCSS respectively.

The TUI workflow

TUI components and templates are provided as markup with TCSS classes. You then copy-paste the markup into your project. In true Tailwind fashion, this seems a bit backwards at first. Why would you want to copy-paste markup if you could just install a library? Customization. If you’ve ever tried to modify something like MUI you know it’s insufferable. Owning the markup and styles saves you from a lot of headaches.

Let’s say you wanted to add a money input to your site. You would first browse through the TUI components to find one that looks like you want.

$
<div>
+                      Is Tailwind UI worth it?    
The Tailwind UI Landing Page showing a bunch of beautiful components

Is Tailwind UI worth it?

Tailwind UI is a collection of high-quality components and templates built by the creators of Tailwind CSS. It’s 300$ price-tag for a single developer license is quite steep when considering all the free alternatives out there. Could it really be worth it?

I’ve been using Tailwind UI for a couple months now. In this article, I’ll go through how it has affected my workflow, what I liked and disliked about it, and who should consider buying it.

Since Tailwind UI and Tailwind CSS share similar, verbose names, I’ll refer to them as TUI and TCSS respectively.

The TUI workflow

TUI components and templates are provided as markup with TCSS classes. You then copy-paste the markup into your project. In true Tailwind fashion, this seems a bit backwards at first. Why would you want to copy-paste markup if you could just install a library? Customization. If you’ve ever tried to modify something like MUI you know it’s insufferable. Owning the markup and styles saves you from a lot of headaches.

Let’s say you wanted to add a money input to your site. You would first browse through the TUI components to find one that looks like you want.

$
<div>
   <label for="price" class="block text-sm font-medium leading-6 text-gray-900">Price</label>
   <div class="relative mt-2 rounded-md shadow-sm">
     <div class="pointer-events-none absolute inset-y-0 left-0 flex items-center pl-3">
@@ -9,4 +9,4 @@
       <span class="text-gray-500 sm:text-sm" id="price-currency">USD</span>
     </div>
   </div>
-</div>

Then you would copy-paste the markup into your project and modify it to fit your theme, changing colors, shapes, fonts and sizes.

$

For more complex components that require interactivity TUI also provides React and Vue components. If you aren’t using these frameworks, the markup also includes comments that explain how the component should behave. This makes it easy to adapt the components to whatever you’re using.

Looking good has never been this easy

Obviously TUI looks great, you just need to browse their site to see that. What’s more impressive is how easy it is to actually achieve similar results in your own project.

I’m used to Bootstrap, MUI or custom CSS. With these, I would spend a lot of time fiddling to get the look right. Promises of “easy styling” and “beautiful components” would often go unfulfilled.

With TUI, I just copy-paste the markup and it looks great. This effortless beauty has been a novel experience for me.

Framework agnostic*

Components are provided as markup with TCSS classes. The appearance of the components is not tied to any specific framework. However, the behavior of the components needs to be implemented separately for each framework.

TUI provides prebuilt React & Vue components that implement expected behavior. If you are using a different framework or no framework you’ll have to implement the behavior yourself. Fortunately, this is rarely difficult. The markup includes comments that explain what each part is for and how it should behave.

I’ve personally adapted many TUI components to Svelte, using Melt UI as the behavior library. It’s been a very smooth experience.

Even when I was using a Server-Side only framework, Symfony, this still worked great. I feel comfortable calling TUI framework agnostic, even though there is an asterisk.

It’s not perfect

There are things that I find frustrating about TUI and wish I had been aware of these before purchasing. Hopefully they will be addressed in future.

Components come in just one theme

All, or at least most, TUI components come in a single theme. Either light or dark. This is unfortunate, since one of TCSS’s nicest features is it’s easy multi-theme styling.

It’s especially odd when a component appears in both light and dark across different pages. Clearly both variants exist, so why not make them available?

The thinking here was likely that developers would not expected a component to differ visually from the preview page. If you are building a light-theme site and you copy-paste light theme components, having them suddenly turn dark would be unexpected. This is likely why they decided to make the components single-theme.

I would appreciate components being provided in three variants: light, dark and adaptive.

Tailwind CSS limitations show

TUI has limited itself to just using TCSS classes. This is great, as all components are easy to copy-paste and customize. However, occasionally the limitations of TCSS shine through.

While TCSS can do pretty much anything CSS can when styling a single element. However, it does not match CSS when it comes to multi-element interaction and styling, it’s most powerful feature. TUI has to abstain from this, which results in some awkward recommendations, usually manifesting in an overreliance on JS.

For example, it recommends opening dropdowns by adding/removing an element JS. That part of the site won’t work without JS. This is a shame, since this could easily be solved with a few lines of CSS. Failing to adhere to the Least Power Principle like this makes your sites more fragile for no benefit.

The solution here isn’t to add custom CSS to TUI, but to expand TCSS’s capabilities. Until that happens I will continue to be frustrated by this.

No Offline Support

I’m currently writing this on a train and I’m unable to access TUI in any way. This is a rather glaring oversight, as it would be very straight forward to enable offline access. A basic ServiceWorker would do. Caching static assets isn’t hard.

Is it worth it?

I have yet to answer the ultimate question: Is it worth it? I would give the usual “it depends”, because it really does depend, but that’s not fair. It’s not fair because you have to buy it to try it and therefore can’t know on which side of the fence you’ll land. That’s no good.

So instead I’ll give a less correct but more actionable answer. Yes, it’s worth it, unless you have a reason it isn’t.

The amount of time it has saved me, even over the ostensibly many free alternatives, has been worth the price. I’ve been able to build beautiful sites with minimal effort. I’ve been able to focus on shipping, not fiddling. I believe most developers would experience these benefits if they were to buy it, and it would be worth it for them. The only reason I can think of for it not being worth it is if you have a very specific design system that TUI doesn’t match. Or if you aren’t getting paid, then saving time isn’t very valuable.

\ No newline at end of file +</div>

Then you would copy-paste the markup into your project and modify it to fit your theme, changing colors, shapes, fonts and sizes.

$

For more complex components that require interactivity TUI also provides React and Vue components. If you aren’t using these frameworks, the markup also includes comments that explain how the component should behave. This makes it easy to adapt the components to whatever you’re using.

Looking good has never been this easy

Obviously TUI looks great, you just need to browse their site to see that. What’s more impressive is how easy it is to actually achieve similar results in your own project.

I’m used to Bootstrap, MUI or custom CSS. With these, I would spend a lot of time fiddling to get the look right. Promises of “easy styling” and “beautiful components” would often go unfulfilled.

With TUI, I just copy-paste the markup and it looks great. This effortless beauty has been a novel experience for me.

Framework agnostic*

Components are provided as markup with TCSS classes. The appearance of the components is not tied to any specific framework. However, the behavior of the components needs to be implemented separately for each framework.

TUI provides prebuilt React & Vue components that implement expected behavior. If you are using a different framework or no framework you’ll have to implement the behavior yourself. Fortunately, this is rarely difficult. The markup includes comments that explain what each part is for and how it should behave.

I’ve personally adapted many TUI components to Svelte, using Melt UI as the behavior library. It’s been a very smooth experience.

Even when I was using a Server-Side only framework, Symfony, this still worked great. I feel comfortable calling TUI framework agnostic, even though there is an asterisk.

It’s not perfect

There are things that I find frustrating about TUI and wish I had been aware of these before purchasing. Hopefully they will be addressed in future.

Components come in just one theme

All, or at least most, TUI components come in a single theme. Either light or dark. This is unfortunate, since one of TCSS’s nicest features is it’s easy multi-theme styling.

It’s especially odd when a component appears in both light and dark across different pages. Clearly both variants exist, so why not make them available?

The thinking here was likely that developers would not expected a component to differ visually from the preview page. If you are building a light-theme site and you copy-paste light theme components, having them suddenly turn dark would be unexpected. This is likely why they decided to make the components single-theme.

I would appreciate components being provided in three variants: light, dark and adaptive.

Tailwind CSS limitations show

TUI has limited itself to just using TCSS classes. This is great, as all components are easy to copy-paste and customize. However, occasionally the limitations of TCSS shine through.

While TCSS can do pretty much anything CSS can when styling a single element. However, it does not match CSS when it comes to multi-element interaction and styling, it’s most powerful feature. TUI has to abstain from this, which results in some awkward recommendations, usually manifesting in an overreliance on JS.

For example, it recommends opening dropdowns by adding/removing an element JS. That part of the site won’t work without JS. This is a shame, since this could easily be solved with a few lines of CSS. Failing to adhere to the Least Power Principle like this makes your sites more fragile for no benefit.

The solution here isn’t to add custom CSS to TUI, but to expand TCSS’s capabilities. Until that happens I will continue to be frustrated by this.

No Offline Support

I’m currently writing this on a train and I’m unable to access TUI in any way. This is a rather glaring oversight, as it would be very straight forward to enable offline access. A basic ServiceWorker would do. Caching static assets isn’t hard.

Is it worth it?

I have yet to answer the ultimate question: Is it worth it? I would give the usual “it depends”, because it really does depend, but that’s not fair. It’s not fair because you have to buy it to try it and therefore can’t know on which side of the fence you’ll land. That’s no good.

So instead I’ll give a less correct but more actionable answer. Yes, it’s worth it, unless you have a reason it isn’t.

The amount of time it has saved me, even over the ostensibly many free alternatives, has been worth the price. I’ve been able to build beautiful sites with minimal effort. I’ve been able to focus on shipping, not fiddling. I believe most developers would experience these benefits if they were to buy it, and it would be worth it for them. The only reason I can think of for it not being worth it is if you have a very specific design system that TUI doesn’t match. Or if you aren’t getting paid, then saving time isn’t very valuable.

\ No newline at end of file diff --git a/the-better-way-to-load-data.html b/the-better-way-to-load-data.html index 04098699..394d404a 100644 --- a/the-better-way-to-load-data.html +++ b/the-better-way-to-load-data.html @@ -1,4 +1,4 @@ - The better way to load data

The Better Way to load data

When we are loading lists in out web-apps, we usually do the following. Our app makes a fetch request to a server, waits for all the data to arrive, maybe the app validates it, and then displays the items.

import { 
const TodosSchema: z.ZodArray<z.ZodObject<{
+                     The better way to load data    

The Better Way to load data

When we are loading lists in out web-apps, we usually do the following. Our app makes a fetch request to a server, waits for all the data to arrive, maybe the app validates it, and then displays the items.

import { 
const TodosSchema: z.ZodArray<z.ZodObject<{
     description: z.ZodString;
     done: z.ZodBoolean;
 }, "strip", z.ZodTypeAny, {
@@ -336,4 +336,4 @@
     description: string;
     done: boolean;
 }
todo
);
-}

A few observations to close out.

  1. On slow connections, the Streaming version is both faster to show stuff to the user and also finishes earlier, since the parsing and validation happen in parallel with the fetching. On fast connections, the performance difference is negligible.
  2. Once the MapStream and asIterable helpers are defined, the streaming version of the code isn’t meaningfully longer. The effort for both versions is about the same.
  3. The bundle size for the streaming versions is slightly larger than the non-streaming version since we need to ship the JSONParser (+20kB). This isn’t always worth it. On sites with long session times it likely is worth it, since the extra code is only sent once and every subsequent request can be sped up. In PWAs, where your code is already cached on the client, streaming is a no brainer.

There is a lot more you can do with streams, I really encourage you to play around with them. They’re a really powerful idea that applies to much more than just data-fetching. I hope you’ve learned something and have a good day.

\ No newline at end of file +}

A few observations to close out.

  1. On slow connections, the Streaming version is both faster to show stuff to the user and also finishes earlier, since the parsing and validation happen in parallel with the fetching. On fast connections, the performance difference is negligible.
  2. Once the MapStream and asIterable helpers are defined, the streaming version of the code isn’t meaningfully longer. The effort for both versions is about the same.
  3. The bundle size for the streaming versions is slightly larger than the non-streaming version since we need to ship the JSONParser (+20kB). This isn’t always worth it. On sites with long session times it likely is worth it, since the extra code is only sent once and every subsequent request can be sped up. In PWAs, where your code is already cached on the client, streaming is a no brainer.

There is a lot more you can do with streams, I really encourage you to play around with them. They’re a really powerful idea that applies to much more than just data-fetching. I hope you’ve learned something and have a good day.

\ No newline at end of file diff --git a/the-poor-mans-scheduled-post.html b/the-poor-mans-scheduled-post.html index 56559201..4db44598 100644 --- a/the-poor-mans-scheduled-post.html +++ b/the-poor-mans-scheduled-post.html @@ -1,4 +1,4 @@ - The poor man's scheduled post

The poor man’s scheduled post

While working on this site, I quickly developed the desire to schedule posts. Often I would write a post, and then immediately have another idea for another post. Not wanting to publish them all at once, I would often just save the post as a draft and come back to it later. Having a way to schedule posts would be much more convenient.

But, this site is currently (July 2023) hosted on GitHub Pages, which only hosts static sites. This makes it difficult to schedule posts, as there is no server to decide when a post should become available.

The solution

The solution I came up with is quite a brute force one, but it works very well. During the site-build, filter out any posts that have a publish date in the future. Then have a Github Actions cron-job that rebuilds and redeploys the site every day. This will cause the posts to become available on the day they are scheduled to be published.

Doing this is quite straight forward. Go into your GitHub Actions workflow file, and add a schedule trigger. Make it run every day at midnight.

name: Build and Deploy
+                     The poor man's scheduled post    

The poor man’s scheduled post

While working on this site, I quickly developed the desire to schedule posts. Often I would write a post, and then immediately have another idea for another post. Not wanting to publish them all at once, I would often just save the post as a draft and come back to it later. Having a way to schedule posts would be much more convenient.

But, this site is currently (July 2023) hosted on GitHub Pages, which only hosts static sites. This makes it difficult to schedule posts, as there is no server to decide when a post should become available.

The solution

The solution I came up with is quite a brute force one, but it works very well. During the site-build, filter out any posts that have a publish date in the future. Then have a Github Actions cron-job that rebuilds and redeploys the site every day. This will cause the posts to become available on the day they are scheduled to be published.

Doing this is quite straight forward. Go into your GitHub Actions workflow file, and add a schedule trigger. Make it run every day at midnight.

name: Build and Deploy
 on: 
   # The Schedule on which to redeploy the site (every day at midnight)
   schedule:
@@ -11,4 +11,4 @@
 
 jobs:
   build-and-deploy:
-    ... # Your build and deploy steps

This is very wasteful, but with GitHub’s generous free tier you can easily get away with it. It’s also very reliable. I’ve tested this by building an experiment that just prerenders the current date every day. It’s been running for a few months now, and it’s never failed. Thanks to it’s very fast build times, it has used so little of my free minutes that it doesn’t even show up on my usage graph.

The build-time for this site is a lot longer, at almost two minutes, but that’s still only 60 out of the 2000 free minutes per month. I’m willing to pay that price for the convenience of scheduled posts.

\ No newline at end of file + ... # Your build and deploy steps

This is very wasteful, but with GitHub’s generous free tier you can easily get away with it. It’s also very reliable. I’ve tested this by building an experiment that just prerenders the current date every day. It’s been running for a few months now, and it’s never failed. Thanks to it’s very fast build times, it has used so little of my free minutes that it doesn’t even show up on my usage graph.

The build-time for this site is a lot longer, at almost two minutes, but that’s still only 60 out of the 2000 free minutes per month. I’m willing to pay that price for the convenience of scheduled posts.

\ No newline at end of file diff --git a/zod-driven-development.html b/zod-driven-development.html index a9c5d835..8a037d22 100644 --- a/zod-driven-development.html +++ b/zod-driven-development.html @@ -1 +1 @@ - Zod Driven Development

Zod Driven Development

Over the last year we’ve been seing zod, a schema validation library, grow into the lingua franca of many tools. From trpc, to SvelteKit Superforms, even ORMs like drizzle. I myself have contributed to this trend with zocker, a mock-data generator that uses zod schemas to generate mock data.

This got me thinking. How far can I push this? Could I generate an entire app from a zod schema? This article documents my experiment.

The Goalpost and how to get there

Since this is a toy project, I’m going to keep the scope limited to a CRUD app with multiple entities. Let’s build a library app. We’ll have Books, Authors and Publishers. Each book has one author and one publisher. Each author can have multiple books. Each publisher can have multiple books.

We will be using the following tools:

Conclusion

Zod Driven Development has some very obvious strengths. Development speed and maintainability. But, once more complexity get’s added, such as authorization, complex business logic, and more complex relationships, it starts to struggle.

Earlier iterations of similar ideas, such as model-driven development, have caused many development failures when the complexity of the projects grew beyond the capabilities of the tool. healthcare.gov is a prime example of this.

Fortunately, ZDD is not an all or nothing approach. You can use this for the trivial parts of your app, and write the more complex parts the way you usually would. It only gets dangerous if your tooling grows more complex than the app itself.

While I likely won’t be generating entire apps like this, I will certainly add some of these tools to my toolkit. The form-generation & validation, the automagic UI, and the mock-data generation have all been fantastic to work with. Drizzle is not quite there yet, but it’s evolving fast and I’m excited to see where it goes.

\ No newline at end of file + Zod Driven Development

Zod Driven Development

Over the last year we’ve been seing zod, a schema validation library, grow into the lingua franca of many tools. From trpc, to SvelteKit Superforms, even ORMs like drizzle. I myself have contributed to this trend with zocker, a mock-data generator that uses zod schemas to generate mock data.

This got me thinking. How far can I push this? Could I generate an entire app from a zod schema? This article documents my experiment.

The Goalpost and how to get there

Since this is a toy project, I’m going to keep the scope limited to a CRUD app with multiple entities. Let’s build a library app. We’ll have Books, Authors and Publishers. Each book has one author and one publisher. Each author can have multiple books. Each publisher can have multiple books.

We will be using the following tools:

Conclusion

Zod Driven Development has some very obvious strengths. Development speed and maintainability. But, once more complexity get’s added, such as authorization, complex business logic, and more complex relationships, it starts to struggle.

Earlier iterations of similar ideas, such as model-driven development, have caused many development failures when the complexity of the projects grew beyond the capabilities of the tool. healthcare.gov is a prime example of this.

Fortunately, ZDD is not an all or nothing approach. You can use this for the trivial parts of your app, and write the more complex parts the way you usually would. It only gets dangerous if your tooling grows more complex than the app itself.

While I likely won’t be generating entire apps like this, I will certainly add some of these tools to my toolkit. The form-generation & validation, the automagic UI, and the mock-data generation have all been fantastic to work with. Drizzle is not quite there yet, but it’s evolving fast and I’m excited to see where it goes.

\ No newline at end of file