diff --git a/bin/render-streaming-markdown.ts b/bin/render-streaming-markdown.ts index 2a25a0eb7..6649310f8 100644 --- a/bin/render-streaming-markdown.ts +++ b/bin/render-streaming-markdown.ts @@ -2,7 +2,9 @@ // cat README.md | deno run --allow-env --allow-read --allow-run bin/render-streaming-markdown.ts // -import $ from "jsr:@david/dax@0.41.0" +import $ from 'jsr:@david/dax@0.41.0' +// todo: replace dax with this: +// import { exec } from 'https://deno.land/std/process.mod.ts'; import { writeAllSync } from 'https://deno.land/std@v0.190.0/streams/mod.ts'; let inputBuffer = "" @@ -10,12 +12,43 @@ let inputBuffer = "" const decoder = new TextDecoder() const encoder = new TextEncoder() +// This style works well for prompt.. but not chat for await (const chunk of Deno.stdin.readable) { - const decoded = decoder.decode(chunk); - inputBuffer += decoded - // console.log("$$$$$$$$$$", decoder.decode(chunk), "$$$zzz$$$") + // show immediately, but meanwhile… + writeAllSync(Deno.stdout, chunk); + // Collect it. + inputBuffer += decoder.decode(chunk); +} - // --style auto is there to force it to output styled https://github.com/charmbracelet/glow/blob/2430b0a/main.go#L158 - const output = await $`glow --style auto`.stdinText(decoded).text() +// and now re-render it. +if (inputBuffer) { + console.log('⬇️… and now rendered…⬇️'); + const output = await $`glow --style auto`.stdinText(inputBuffer).text() writeAllSync(Deno.stdout, encoder.encode(output)); } + + +// This is a newline-buffered variant to avoid getting extra newlines in the output because we send it to glow too eagerly +// it works but... the next problem is backtick codeblocks are broken up and... i'm sure there's more. +// definitely need a better solution + +// let remainingContent = ''; +// for await (const chunk of Deno.stdin.readable) { +// const decoded = remainingContent + decoder.decode(chunk); + +// const lastNewline = decoded.lastIndexOf("\n"); +// if (lastNewline !== -1) { +// // Flush everything up to it +// const output = await $`glow --style auto`.stdinText(decoded.substring(0, lastNewline + 1)).text() +// writeAllSync(Deno.stdout, encoder.encode(output)); + +// // Hold onto the remaining content to flush with the next chunk +// remainingContent = decoded.substring(lastNewline + 1); +// } +// } + +// // Flush any remaining content +// if (remainingContent) { +// const output = await $`glow --style auto`.stdinText(remainingContent).text() +// writeAllSync(Deno.stdout, encoder.encode(output)); +// } diff --git a/fish/aliases.fish b/fish/aliases.fish index 1646552d4..d56e09c3a 100644 --- a/fish/aliases.fish +++ b/fish/aliases.fish @@ -142,10 +142,11 @@ alias update_brew_npm_gem='brew_update; npm install npm -g; npm update -g; sudo abbr gemini "llm -m gemini-1.5-pro-latest" function gemi + # using https://github.com/simonw/llm-gemini and llm if test -n "$argv[1]" - llm prompt -m gemini-1.5-pro-latest $argv[1] | deno run --allow-env --allow-read --allow-run bin/render-streaming-markdown.ts + llm prompt -m gemini-1.5-pro-latest $argv[1] | deno run --allow-env --allow-read --allow-run bin/render-streaming-markdown.ts else - llm chat --continue -m gemini-1.5-pro-latest | deno run --allow-env --allow-read --allow-run bin/render-streaming-markdown.ts + llm chat --continue -m gemini-1.5-pro-latest end end