Compare commits

...

15 commits
master ... main

Author SHA1 Message Date
Fabien Freling f5a82e8d21 add jujutsu article 2024-11-29 17:42:27 +01:00
Fabien Freling 45ce87e6b1 update article main page generator 2024-11-28 09:47:19 +01:00
Fabien Freling 70005c9228 update mastodon link 2024-11-28 09:46:59 +01:00
Fabien Freling 5066f456f1 add nushell 2024-11-27 15:24:32 +01:00
Fabien Freling ca36f290b4 fix variant path for footer generated file 2024-11-27 10:09:08 +01:00
Fabien Freling 8a84b11503 remove ubuntu setup 2024-11-19 17:44:01 +01:00
Fabien Freling f0af5efd81 add jujutsu 2024-11-19 17:42:21 +01:00
Fabien Freling 6a3440dd1a switch to flake 2024-11-19 15:13:02 +01:00
Fabien Freling c28b99933e update "about" page 2022-07-10 23:45:04 +02:00
Fabien Freling be3345736b add AVIF support 2022-01-13 14:41:39 +01:00
Fabien Freling fa15d60ae1 add nix shell 2022-01-12 16:36:11 +01:00
Fabien Freling eacb2c685b update deploy location 2021-12-29 17:51:19 +01:00
Fabien Freling 6911b53616 add git large files article 2021-12-29 16:02:26 +01:00
Fabien Freling b73a81a129 add horizontal rule 2021-08-31 13:59:09 +02:00
Fabien Freling c6543c3669 update build scripts 2021-08-31 13:58:44 +02:00
18 changed files with 311 additions and 45 deletions

3
.gitignore vendored
View file

@ -9,3 +9,6 @@ site
graph.png
/preview
.jj
.direnv

View file

@ -1,15 +1,17 @@
&root = .
&tmpl_dir = ./templates
&gen_tmpl_dir = ./build/templates
HTML_TEMPLATE = main.html
!html = | &(root)/templates/footer.html \
|> ^ html %f^ \
!html = | &(tmpl_dir)/<tmpl_group> \
|> ^b html %f^ \
pandoc --from markdown --to html \
--template=&(root)/templates/$(HTML_TEMPLATE) \
--template=&(tmpl_dir)/$(HTML_TEMPLATE) \
--include-in-header=&(tmpl_dir)/header.html \
--include-before-body=&(tmpl_dir)/nav.html \
--css &(root)/css/style.css \
--include-in-header=&(root)/templates/header.html \
--include-before-body=&(root)/templates/nav.html \
--include-after-body=&(root)/templates/footer.html \
--include-after-body=&(gen_tmpl_dir)/footer.html \
%f | sed 's|%%webRoot%%|&(root)|g' > %o \
|> %B.html
@ -20,5 +22,13 @@ HTML_TEMPLATE = main.html
!compress_pdf = |> ps2pdf %f %o |>
JPG_OPT = -quality 80 -strip -interlace Plane
!blur_mini = |> convert %f -resize 400x400 -blur 0x8 $(JPG_OPT) %o |> blur_mini.jpg
!thumbnail = |> convert %f -resize 200x200 $(JPG_OPT) %o |>
!blur_mini = |> ^ %f -> blur mini^ convert %f -resize 400x400 -blur 0x8 $(JPG_OPT) %o |> blur_mini.jpg
!thumbnail = |> ^ %f -> thumbnail^ convert %f -resize 200x200 $(JPG_OPT) %o |>
#
# Slides
#
MARP = npx @marp-team/marp-cli@latest
MARP_OPTS = --allow-local-files --bespoke.progress
!marp_pdf = |> $(MARP) $(MARP_OPTS) %f --pdf --output %o |> %B.pdf
!marp_html = |> $(MARP) $(MARP_OPTS) %f --html --output %o |> %B.html

View file

@ -29,18 +29,15 @@ sail again.
In 2010 I moved to Oslo, Norway, to work as a [Qt](http://www.qt.io/) software
engineer for [Nokia](http://www.nokia.com/) with the great people of Trolltech.
But in the end, I decided Nokia's new direction with Windows Phone didn't suit
me so I decided to fly back to France.
Damn I missed croissants.
I learned a lot with such a bunch of talented people in different fields
(graphics, web, filesystem, etc).
### 2011 - Computer vision
In 2011 I had the chance to work on computer vision projects. I joined [LTU
technologies](http://www.ltutech.com/) as a research engineer in Paris, France.
I implemented exciting computer vision stuff (image matching, retrieval, etc),
following the state of the art in the field. Unfortunately LTU shut down in
2015.
I implemented exciting computer vision stuff (image matching, retrieval,
similarity, etc), following the state of the art in the field.
### 2015 - Camera software
@ -52,17 +49,24 @@ working on the firmware level and I learned a ton about photography.
### 2017 - Maps
In 2017 I joined [Zenly](http://zen.ly). We made a social app with a strong
focus on the map, putting the fun back in map apps.
In 2017 I joined [Zenly](http://zen.ly) in Paris. We made a social app
with a strong focus on the map, putting the fun back in map apps. Although it
looked "simple", it involved a lot of technology, with a very custom tech stack.
### 2020 - Renewable energy
In 2020 I joined [MoMA](https://www.momagroup.com/) to work on the
In 2020 I joined [MoMA](https://www.momagroup.com/) in Paris again, to work on the
[E6](https://www.e6-group.com/) project. I felt it was the right time to work on
current matters such as energy.
### 2022 - Smart home
In 2022 I joined [Netatmo](https://www.netatmo.com/) in Boulogne-Billancourt to
work on smart homes. I work in the Vision team, making home cameras smarter. I
am thrilled to work on computer vision again.
<div style="text-align:center; padding-top:2em;">
<a href="https://octodon.social/@ffreling">
<a href="https://mas.to/@ffreling">
<img src="images/mastodon.svg" alt="Mastodon" style="height:40px; padding-inline:5px">
</a>
<a href="http://code.ffreling.com">

View file

@ -1,5 +1,5 @@
include_rules
HTML_TEMPLATE = article.html
: *.md |> ./generate_listing.sh > %o |> index.md
: *.md |> ./generate_listing.nu > %o |> index.md
: foreach *.md |> !html |>

32
articles/generate_listing.nu Executable file
View file

@ -0,0 +1,32 @@
#!/usr/bin/env nu
def get_metadata [path: string] {
let lines = open $path | lines
let anchors = $lines | enumerate | filter {|l| ($l.item | str starts-with "---")} | take 2
let header = $lines
| range (($anchors | first | get 'index') + 1)..(($anchors | last | get 'index') - 1)
let metadata = $header | split column -n 2 --regex '\s*:\s*' | rename key value
let record = $metadata | reduce -f {} {|it, acc| $acc | upsert $it.key ($it.value | str trim --char '"') }
$record |
}
let pages = (glob *.md) ++ (glob **/index.md)
let sorted_pages = $pages | wrap 'path'
| upsert metadata {|row| (get_metadata $row.path)}
| sort-by --reverse metadata.date
print "---
title: Articles
---
"
let _ = $sorted_pages | each {|p|
let rel_path = $p.path | path relative-to (pwd)
let html_path = $rel_path | path parse --extension md | upsert extension { 'html' } | path join
print --no-newline $"- ($p.metadata.date): [($p.metadata.title)]\(($html_path)\)"
if $p.metadata.update? != null {
print $" \(Updated: ($p.metadata.update)\)"
} else {
print ""
}
}

View file

@ -1,23 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
cat << EOF
---
title: Articles
---
EOF
listing=""
for file in *.md; do
if [ $file = "index.md" ]; then
continue
fi
link=$(basename $file .md).html
date=$(sed -n 's/date: \(.*\)/\1/p' $file)
title=$(sed -n 's/title: \(.*\)/\1/p' $file)
listing="$listing- $date: [$title]($link)\n"
done
echo -e $listing | sort --reverse

110
articles/git_large_files.md Normal file
View file

@ -0,0 +1,110 @@
---
title: Git and large files
date: 2021-12-29
---
Git is a cornerstone of software development nowadays, it has become the
de-facto version control system.
Its interface is a bit complex to work with but a lot of tooling has been
developed over the years that lessen the pain to deal with it.
One shortcoming of git though (and version control system in general), is
dealing with huge binary files. These files are usually media assets that are
not meant to be diffable, but they do belong to the project nonetheless.
They are different ways to deal with these files:
### 1. treat them like regular text files
This is the easiest solution: do nothing special. It works perfectly and you
keep a clean history. However, as you modify your assets, the repository size
will grow and it will become slower to clone on your CI pipeline. It will also
put more charge on your git server.
### 2. keep them out of your repository
Out of the repository, out of trouble! If you keep your large assets in a
separate directory (Dropbox for instance), your repository will stay light. But
now you need to synchronize your external storage with your repository for your
project. Most of the time, only the latest version is kept around, making it
impossible to inspect an older revision with the appropriate assets.
### 3. store a pointer to external storage
As a compromise, you can store a pointer to external storage in your repository.
Everytime you checkout a specific revision, you will fetch the according data to
external storage and inject it into the project.
The solution 3. is the more convenient solution: we keep regular git workflow,
and put the burden of hosting large files out of git itself.
## Git Large File Storage
[Git Large File Storage](https://git-lfs.github.com/) (Git LFS) is the more
widespread implementation of this mecanism. It is developed by GitHub and is
available on all repositories on their platform. It works out of the box: you
set it once and you can forget about it.
However, there are some shortcomings with Git LFS.
### 1. your project is now longer self-contained in git
If you decide to use Git LFS, you will tie your project with the LFS storage
server. You won't be able to walk through your history without having a storage
server. GitHub LFS server implementation is currently closed-source and only a
"non production ready" reference server is available.
Major hosting platforms have implemented their own implementation and it is
possible to migrate your data among compatibles hosting platforms. But your
local copy of the repository will never hold all the data needed for your
project. In a way, the storage server becomes a centralized piece. You can fetch
all data locally to have it available but it won't be considered a source, it is
more like a cache.
### 2. you can't easily manage storage in LFS
If you commit a bunch of files, then push your changes, all the files will be
stored on the LFS server. If you want to remove them (eg. you uploaded unwanted
files), you can do it locally by doing a rebase, then call `git lfs prune`.
However, that will only clean up your local copies of files. What has been
pushed will stay on the server.
If you wish to remove files from the server, your options depend on the server
implementation:
- on GitHub, your only option to reclaim LFS quota and truly delete files from
LFS is to [delete and recreate your repository](https://docs.github.com/en/repositories/working-with-files/managing-large-files/removing-files-from-git-large-file-storage#git-lfs-objects-in-your-repository)
- on BitBucket Cloud, you can browse LFS files in the web UI and delete specific
files
## Git Annex
[git-annex](https://git-annex.branchable.com/) is a complete solution to deal
with external files in your git repository. It is also more complex than Git
LFS. As you can see in their
[walkthrough](https://git-annex.branchable.com/walkthrough/), you need to
explicitly set remotes for your files, and sync content between remotes.
Data is shared among local repositories in `.git/annex`, but it won't be
available in common source forges such as GitHub. To make this data available to
all people in the project, you can use [special
remotes](https://git-annex.branchable.com/special_remotes/) which are used as
data storage stores, akin to Git lFS (which can be used as a special remote).
Contrary to Git LFS, you can see what content is currently
[unused](https://git-annex.branchable.com/walkthrough/unused_data/), [delete
unwanted files](https://git-annex.branchable.com/tips/deleting_unwanted_files/).
It is a more complex solution but it is more flexible.
## What I recommend
I think git-annex gives the user more control over its data: it can be fully
decentralized and offers tools to manage its content.
Git LFS is simpler and more widely used, but once you hit one of its limitation,
it can be costly to break free.
## Links
- [Large files with Git: LFS and git-annex](https://lwn.net/Articles/774125/)

6
articles/jujutsu/Tupfile Normal file
View file

@ -0,0 +1,6 @@
include_rules
HTML_TEMPLATE = article.html
: slides.md |> !marp_pdf |> jujutsu_slides.pdf ./<deps>
: index.md | ./<deps> |> !html |>

40
articles/jujutsu/index.md Normal file
View file

@ -0,0 +1,40 @@
---
title: "Lightning talk: Jujutsu"
date: 2024-11-26
---
_This article is also available as a lightning talk: [pdf](./jujutsu_slides.pdf)_
## What is it?
[Jujutsu](https://github.com/martinvonz/jj) is a new version control software
(VCS), like git, mercurial, etc. Git is the actual gold standard for VCS, even if its
UX could be better. There is a whole ecosystem around git that makes switching
to similar projects (eg. mercurial) a daunting task. How does jujutsu plans on
making us switch?
Jujutsu separates the frontend (what the user interacts with) and the
backend (how the information is stored). And the main backend is actually git
repositories. There is a native backend being developed but it's not ready for
prime time. Sharing the same backend as the most popular VCS, Jujutsu aims to
improve on the frontend.
## How does it compare?
### No branch, only revisions
Jujutsu understand git commits, but operates at a higher level with revisions.
Revisions wrap commits, however as you move revisions around (edit with changes,
or rebase), they keep their id. Only their underlying commit id changes.
With revision ids being stable, you don't need branches to start working: create
a new revision, and start working. You will need to create a branch (or bookmark
in jujutsu world) to push your changes, but it can be done at the end.
There is no special mode like git's "Detached HEAD", you are always on a revision. You can jump around
## VCS landscape
> I was part of the team at Meta that built Sapling for many years. Im no
> longer at Meta and I use jj full time.
>
> _[Discussion on Lobste.rs](https://lobste.rs/s/rojoz1/jujutsu_jj_git_compatible_vcs#c_foqya4)_

Binary file not shown.

After

Width:  |  Height:  |  Size: 7 KiB

View file

@ -0,0 +1,23 @@
---
marp: true
theme: gaia
footer: '**Fabien Freling** - 2024-11-26'
paginate: true
---
<style>
section::after {
content: attr(data-marpit-pagination) '/' attr(data-marpit-pagination-total);
}
</style>
<!--
_class: lead
-->
# Jujutsu
Life after Git
---

View file

@ -1,6 +1,7 @@
---
title: Web stack
date: 2019-06-26
update: 2024-11-26
---
Previous stacks

25
flake.lock Normal file
View file

@ -0,0 +1,25 @@
{
"nodes": {
"nixpkgs": {
"locked": {
"lastModified": 1731890469,
"narHash": "sha256-D1FNZ70NmQEwNxpSSdTXCSklBH1z2isPR84J6DQrJGs=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "5083ec887760adfe12af64830a66807423a859a7",
"type": "github"
},
"original": {
"id": "nixpkgs",
"type": "indirect"
}
},
"root": {
"inputs": {
"nixpkgs": "nixpkgs"
}
}
},
"root": "root",
"version": 7
}

22
flake.nix Normal file
View file

@ -0,0 +1,22 @@
{
description = "Website";
outputs = { self, nixpkgs }:
let pkgs = nixpkgs.legacyPackages.x86_64-linux;
in {
devShell.x86_64-linux = with pkgs;
mkShell {
nativeBuildInputs = [
graphviz-nox
imagemagick
just
libavif
libjxl
nodejs
nushell
pandoc
tup
];
};
};
}

View file

@ -19,7 +19,7 @@ upload-resume:
deploy: build
rsync --checksum --copy-links -ave 'ssh' \
--exclude-from=rsync_excludes.txt \
build/* fabs@ffreling.com:ffreling.com/
build/* fabs@ffreling.com:/var/www/ffreling.com/
preview: build
python3 -m webbrowser -t "file://{{root}}/build/index.html"

12
shell.nix Normal file
View file

@ -0,0 +1,12 @@
{ pkgs ? import <nixpkgs> {} }:
pkgs.mkShell {
buildInputs = with pkgs; [
imagemagick
just
libavif
nushell
pandoc
tup
];
}

View file

@ -1 +1 @@
: |> ./generate_footer.sh > %o |> footer.html
: |> ./generate_footer.sh > %o |> footer.html ./<tmpl_group>

View file

@ -5,3 +5,4 @@
<a href="%webRoot%/articles/index.html">Articles</a>
<a href="%webRoot%/about.html">About</a>
</div>
<hr>